source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
vla_crash.c | // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
int a;
// CHECK-LABEL: foo
void foo() {
int(*b)[a];
int *(**c)[a];
// CHECK: [[B:%.+]] = alloca i32*,
// CHECK: [[C:%.+]] = alloca i32***,
// CHECK: @__kmpc_global_thread_num
// CHECK: call void @__kmpc_serialized_parallel
// CHECK: call void [[OUTLINED:@[^(]+]](i32* %{{[^,]+}}, i32* %{{[^,]+}}, i64 %{{[^,]+}}, i32** [[B]], i64 %{{[^,]+}}, i32**** [[C]])
// CHECK: call void @__kmpc_end_serialized_parallel
// CHECK: ret void
#pragma omp parallel if (0)
b[0][0] = c[0][a][0][a];
}
// CHECK: define internal void [[OUTLINED]](i32* {{[^,]+}}, i32* {{[^,]+}}, i64 {{[^,]+}}, i32** {{[^,]+}}, i64 {{[^,]+}}, i32**** {{[^,]+}})
// CHECK-LABEL: bar
void bar(int n, int *a) {
// CHECK: [[N:%.+]] = alloca i32,
// CHECK: [[A:%.+]] = alloca i32*,
// CHECK: [[P:%.+]] = alloca i32*,
// CHECK: @__kmpc_global_thread_num
// CHECK: [[BC:%.+]] = bitcast i32** [[A]] to i32*
// CHECK: store i32* [[BC]], i32** [[P]],
// CHECK: call void @__kmpc_serialized_parallel
// CHECK: call void [[OUTLINED:@[^(]+]](i32* %{{[^,]+}}, i32* %{{[^,]+}}, i64 %{{[^,]+}}, i32** [[P]], i32** [[A]])
// CHECK: call void @__kmpc_end_serialized_parallel
// CHECK: ret void
// expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}}
int(*p)[n] = &a;
#pragma omp parallel if(0)
// expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}}
if (p == &a) {
}
}
// CHECK: define internal void [[OUTLINED]](i32* {{[^,]+}}, i32* {{[^,]+}}, i64 {{[^,]+}}, i32** {{[^,]+}}, i32** {{[^,]+}})
|
parallelsections.c | #include <omp.h>
#include <assert.h>
int main()
{
int sum=7;
int known_sum;
int i;
#pragma omp parallel sections reduction(+:sum) private(i) if(1)
{
#pragma omp section
{
for (i=1;i<400;i++){
sum += i;
}
}
#pragma omp section
{
for(i=400;i<700;i++)
sum += i;
}
#pragma omp section
{
for(i=700;i<1000;i++)
sum += i;
}
}/* end of section reduction.*/
known_sum=(999*1000)/2+7;
assert (known_sum==sum);
return 0;
}
|
semi_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.13 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "pfmg.h"
#define hypre_MapRAPMarker(indexRAP, rank) \
{ \
HYPRE_Int imacro,jmacro,kmacro; \
imacro = hypre_IndexX(indexRAP); \
jmacro = hypre_IndexY(indexRAP); \
kmacro = hypre_IndexZ(indexRAP); \
if (imacro==-1) imacro=2; \
if (jmacro==-1) jmacro=2; \
if (kmacro==-1) kmacro=2; \
rank = imacro + 3*jmacro + 9*kmacro; \
}
#define hypre_InverseMapRAPMarker(rank, indexRAP) \
{ \
HYPRE_Int imacro,ijmacro,jmacro,kmacro; \
ijmacro = (rank%9); \
imacro = (ijmacro%3); \
jmacro = (ijmacro-imacro)/3; \
kmacro = (rank-3*jmacro-imacro)/9; \
if (imacro==2) imacro=-1; \
if (jmacro==2) jmacro=-1; \
if (kmacro==2) kmacro=-1; \
hypre_SetIndex(indexRAP,imacro,jmacro,kmacro); \
}
/*--------------------------------------------------------------------------
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_SemiCreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir,
HYPRE_Int P_stored_as_transpose )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
HYPRE_Int *not_cdirs;
hypre_StructStencil *A_stencil;
HYPRE_Int A_stencil_size;
hypre_Index *A_stencil_shape;
hypre_Index indexR;
hypre_Index indexRA;
hypre_Index indexRAP;
HYPRE_Int Rloop, Aloop;
HYPRE_Int j, i;
HYPRE_Int d;
HYPRE_Int stencil_rank;
HYPRE_Int *RAP_marker;
HYPRE_Int RAP_marker_size;
HYPRE_Int RAP_marker_rank;
A_stencil = hypre_StructMatrixStencil(A);
dim = hypre_StructStencilDim(A_stencil);
A_stencil_size = hypre_StructStencilSize(A_stencil);
A_stencil_shape = hypre_StructStencilShape(A_stencil);
/*-----------------------------------------------------------------------
* Allocate RAP_marker array used to deternine which offsets are
* present in RAP. Initialized to zero indicating no offsets present.
*-----------------------------------------------------------------------*/
RAP_marker_size = 1;
for (i = 0; i < dim; i++)
{
RAP_marker_size *= 3;
}
RAP_marker = hypre_CTAlloc(HYPRE_Int, RAP_marker_size);
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
hypre_ClearIndex(indexR);
hypre_ClearIndex(indexRA);
hypre_ClearIndex(indexRAP);
stencil_rank = 0;
/*-----------------------------------------------------------------------
* Calculate RAP stencil by symbolic computation of triple matrix
* product RAP. We keep track of index to update RAP_marker.
*-----------------------------------------------------------------------*/
for (Rloop = -1; Rloop < 2; Rloop++)
{
hypre_IndexD(indexR,cdir) = Rloop;
for (Aloop = 0; Aloop < A_stencil_size; Aloop++)
{
for (d = 0; d < dim; d++)
{
hypre_IndexD(indexRA, d) = hypre_IndexD(indexR, d) +
hypre_IndexD(A_stencil_shape[Aloop], d);
}
/*-----------------------------------------------------------------
* If RA part of the path lands on C point, then P part of path
* stays at the C point. Divide by 2 to yield to coarse index.
*-----------------------------------------------------------------*/
if ((hypre_IndexD(indexRA, cdir) % 2) == 0)
{
hypre_CopyIndex(indexRA, indexRAP);
hypre_IndexD(indexRAP,cdir) /= 2;
hypre_MapRAPMarker(indexRAP,RAP_marker_rank);
RAP_marker[RAP_marker_rank]++;
}
/*-----------------------------------------------------------------
* If RA part of the path lands on F point, then P part of path
* move +1 and -1 in cdir. Divide by 2 to yield to coarse index.
*-----------------------------------------------------------------*/
else
{
hypre_CopyIndex(indexRA, indexRAP);
hypre_IndexD(indexRAP,cdir) += 1;
hypre_IndexD(indexRAP,cdir) /= 2;
hypre_MapRAPMarker(indexRAP,RAP_marker_rank);
RAP_marker[RAP_marker_rank]++;
hypre_CopyIndex(indexRA, indexRAP);
hypre_IndexD(indexRAP,cdir) -= 1;
hypre_IndexD(indexRAP,cdir) /= 2;
hypre_MapRAPMarker(indexRAP,RAP_marker_rank);
RAP_marker[RAP_marker_rank]++;
}
}
}
/*-----------------------------------------------------------------------
* For symmetric A, we zero out some entries of RAP_marker to yield
* the stencil with the proper stored entries.
* The set S of stored off diagonal entries are such that paths in
* RAP resulting in a contribution to a entry of S arise only from
* diagonal entries of A or entries contined in S.
*
* In 1d
* =====
* cdir = 0
* (i) in S if
* i<0.
*
* In 2d
* =====
* cdir = 1 cdir = 0
* (i,j) in S if (i,j) in S if
* i<0, j<0,
* or i=0 & j<0. or j=0 & i<0.
*
* In 3d
* =====
* cdir = 2 cdir = 1 cdir = 0
* (i,j,k) in S if (i,j,k) in S if (i,j,k) in S if
* i<0, k<0, j<0,
* or i=0 & j<0, or k=0 & i<0, j=0 & k<0,
* or i=j=0 & k<0. or k=i=0 & j<0. j=k=0 & i<0.
*-----------------------------------------------------------------------*/
if (hypre_StructMatrixSymmetric(A))
{
if (dim > 1)
{
not_cdirs = hypre_CTAlloc(HYPRE_Int, dim-1);
}
for (d = 1; d < dim; d++)
{
not_cdirs[d-1] = (dim+cdir-d) % dim;
}
hypre_ClearIndex(indexRAP);
hypre_IndexD(indexRAP, cdir) = 1;
hypre_MapRAPMarker(indexRAP,RAP_marker_rank);
RAP_marker[RAP_marker_rank] = 0;
if (dim > 1)
{
hypre_ClearIndex(indexRAP);
hypre_IndexD(indexRAP,not_cdirs[0]) = 1;
for (i = -1; i < 2; i++)
{
hypre_IndexD(indexRAP,cdir) = i;
hypre_MapRAPMarker(indexRAP,RAP_marker_rank);
RAP_marker[RAP_marker_rank] = 0;
}
}
if (dim > 2)
{
hypre_ClearIndex(indexRAP);
hypre_IndexD(indexRAP,not_cdirs[1]) = 1;
for (i = -1; i < 2; i++)
{
hypre_IndexD(indexRAP,not_cdirs[0]) = i;
for (j = -1; j < 2; j++)
{
hypre_IndexD(indexRAP,cdir) = j;
hypre_MapRAPMarker(indexRAP,RAP_marker_rank);
RAP_marker[RAP_marker_rank] = 0;
}
}
}
if (dim > 1)
{
hypre_TFree(not_cdirs);
}
}
RAP_stencil_size= 0;
for (i = 0; i < RAP_marker_size; i++)
{
if ( RAP_marker[i] != 0 )
{
RAP_stencil_size++;
}
}
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
stencil_rank= 0;
for (i = 0; i < RAP_marker_size; i++)
{
if ( RAP_marker[i] != 0 )
{
hypre_InverseMapRAPMarker(i,RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
RAP_stencil = hypre_StructStencilCreate(dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
hypre_TFree(RAP_marker);
return RAP;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SemiBuildRAP( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
HYPRE_Int P_stored_as_transpose,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_StructStencil *coarse_stencil;
HYPRE_Int coarse_stencil_size;
hypre_Index *coarse_stencil_shape;
HYPRE_Int *coarse_symm_elements;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_ptr;
double *rap_ptrS, *rap_ptrU, *rap_ptrD;
HYPRE_Int symm_path_multiplier;
HYPRE_Int iA, iAp;
HYPRE_Int iAc;
HYPRE_Int iP, iPp;
HYPRE_Int iR;
HYPRE_Int COffsetA;
HYPRE_Int COffsetP;
HYPRE_Int AOffsetP;
HYPRE_Int RAPloop;
HYPRE_Int diag;
HYPRE_Int dim;
HYPRE_Int d;
double zero = 0.0;
coarse_stencil = hypre_StructMatrixStencil(RAP);
coarse_stencil_size = hypre_StructStencilSize(coarse_stencil);
coarse_symm_elements = hypre_StructMatrixSymmElements(RAP);
coarse_stencil_shape = hypre_StructStencilShape(coarse_stencil);
dim = hypre_StructStencilDim(coarse_stencil);
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
/*-----------------------------------------------------------------
* Loop over boxes to compute entries of RAP
*-----------------------------------------------------------------*/
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
hypre_BoxGetSize(cgrid_box, loop_size);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*
* pa "down" pb "up"
*
* C
*
* |
* v
*
* F F
*
* ^
* |
*
* C
*
*-----------------------------------------------------------------*/
hypre_ClearIndex(index);
if (P_stored_as_transpose)
{
hypre_IndexD(index, cdir) = 1;
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_IndexD(index, cdir) = -1;
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
}
else
{
hypre_IndexD(index, cdir) = -1;
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_IndexD(index, cdir) = 1;
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
}
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*
* rb "down" ra "up"
*
* F
*
* |
* v
*
* C C
*
* ^
* |
*
* F
*
*-----------------------------------------------------------------*/
hypre_ClearIndex(index);
if (P_stored_as_transpose)
{
hypre_IndexD(index, cdir) = 1;
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_IndexD(index, cdir) = -1;
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
}
else
{
hypre_IndexD(index, cdir) = -1;
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_IndexD(index, cdir) = 1;
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
}
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoops below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets (and those defined later in the switch statement) are
* used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_ClearIndex(index);
hypre_IndexD(index, cdir) = 1;
COffsetA = hypre_BoxOffsetDistance(A_dbox,index);
COffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Entries in RAP are calculated by accumulation, must first
* zero out entries.
*-----------------------------------------------------------------*/
for (RAPloop = 0; RAPloop < coarse_stencil_size; RAPloop++)
{
if (coarse_symm_elements[RAPloop] == -1)
{
rap_ptrS = hypre_StructMatrixBoxData(RAP, ci, RAPloop);
hypre_BoxLoop1Begin(hypre_StructMatrixDim(A), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_ptrS[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
}
}
/*-----------------------------------------------------------------
* Computational loop. Written as a loop over stored entries of
* RAP. We then get the pointer (a_ptr) for the same index in A.
* If it exists, we then calculate all RAP paths involving this
* entry of A.
*-----------------------------------------------------------------*/
for (RAPloop = 0; RAPloop < coarse_stencil_size; RAPloop++)
{
if (coarse_symm_elements[RAPloop] == -1)
{
/*-------------------------------------------------------------
* Get pointer for A that corresponds to the current RAP index.
* If pointer is non-null, i.e. there is a corresponding entry
* in A, compute paths.
*-------------------------------------------------------------*/
hypre_CopyIndex(coarse_stencil_shape[RAPloop], index);
a_ptr = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
if (a_ptr != NULL)
{
switch (hypre_IndexD(index, cdir))
{
/*-----------------------------------------------------
* If A stencil index is 0 in coarsened direction, need
* to calculate (r,p) pairs (stay,stay) (up,up) (up,down)
* (down,up) and (down,down). Paths 1,3 & 4 {(s,s),(u,d),
* (d,u)} yield contributions to RAP with the same stencil
* index as A. Path 2 (u,u) contributes to RAP with
* index +1 in coarsened direction. Path 5 (d,d)
* contributes to RAP with index -1 in coarsened
* direction.
*-----------------------------------------------------*/
case 0:
hypre_IndexD(index,cdir) = 1;
rap_ptrU = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
hypre_IndexD(index,cdir) = -1;
rap_ptrD = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
hypre_IndexD(index,cdir) = 0;
AOffsetP = hypre_BoxOffsetDistance(P_dbox, index);
rap_ptrS = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
diag = 0;
for (d = 0; d < dim; d++)
{
diag += hypre_IndexD(index,d) * hypre_IndexD(index,d);
}
if (diag == 0 && hypre_StructMatrixSymmetric(RAP))
{
/*--------------------------------------------------
* If A stencil index is (0,0,0) and RAP is symmetric,
* must not calculate (up,up) path. It's symmetric
* to the (down,down) path and calculating both paths
* incorrectly doubles the contribution. Additionally
* the (up,up) path contributes to a non-stored entry
* in RAP.
*--------------------------------------------------*/
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAp,iPp) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
/* path 1 : (stay,stay) */
rap_ptrS[iAc] += a_ptr[iA] ;
/* path 2 : (up,up) */
/* path 3 : (up,down) */
iAp = iA + COffsetA;
iPp = iP + AOffsetP;
rap_ptrS[iAc] += ra[iR] * a_ptr[iAp] * pa[iPp];
/* path 4 : (down,up) */
iAp = iA - COffsetA;
rap_ptrS[iAc] += rb[iR] * a_ptr[iAp] * pb[iPp];
/* path 5 : (down,down) */
iPp = iP - COffsetP + AOffsetP;
rap_ptrD[iAc] += rb[iR] * a_ptr[iAp] * pa[iPp];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
/*--------------------------------------------------
* If A stencil index is not (0,0,0) or RAP is
* nonsymmetric, all 5 paths are calculated.
*--------------------------------------------------*/
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAp,iPp) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
/* path 1 : (stay,stay) */
rap_ptrS[iAc] += a_ptr[iA] ;
/* path 2 : (up,up) */
iAp = iA + COffsetA;
iPp = iP + COffsetP + AOffsetP;
rap_ptrU[iAc] += ra[iR] * a_ptr[iAp] * pb[iPp];
/* path 3 : (up,down) */
iPp = iP + AOffsetP;
rap_ptrS[iAc] += ra[iR] * a_ptr[iAp] * pa[iPp];
/* path 4 : (down,up) */
iAp = iA - COffsetA;
rap_ptrS[iAc] += rb[iR] * a_ptr[iAp] * pb[iPp];
/* path 5 : (down,down) */
iPp = iP - COffsetP + AOffsetP;
rap_ptrD[iAc] += rb[iR] * a_ptr[iAp] * pa[iPp];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
break;
/*-----------------------------------------------------
* If A stencil index is -1 in coarsened direction, need
* to calculate (r,p) pairs (stay,up) (stay,down) (up,stay)
* and (down,stay). Paths 2 & 4 {(s,d),(d,s)} contribute
* to RAP with same stencil index as A. Paths 1 & 3
* {(s,u),(u,s)} contribute to RAP with index 0 in
* coarsened direction.
*-----------------------------------------------------*/
case -1:
rap_ptrD = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
hypre_IndexD(index,cdir) = 0;
AOffsetP = hypre_BoxOffsetDistance(P_dbox, index);
rap_ptrS = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
/*--------------------------------------------------
* If A stencil index is zero except in coarsened
* dirction and RAP is symmetric, must calculate
* symmetric paths for (stay,up) and (up,stay).
* These contribute to the diagonal entry of RAP.
* These additional paths have the same numerical
* contribution as the calculated path. We multiply
* by two to account for them.
*--------------------------------------------------*/
symm_path_multiplier = 1;
diag = 0;
for (d = 0; d < dim; d++)
{
diag += hypre_IndexD(index,d) * hypre_IndexD(index,d);
}
if (diag == 0 && hypre_StructMatrixSymmetric(RAP))
{
symm_path_multiplier = 2;
}
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAp,iPp) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
/* Path 1 : (stay,up) & symmetric path */
iPp = iP + AOffsetP;
rap_ptrS[iAc] += symm_path_multiplier *
(a_ptr[iA] * pb[iPp]);
/* Path 2 : (stay,down) */
iPp = iP - COffsetP + AOffsetP;
rap_ptrD[iAc] += a_ptr[iA] * pa[iPp];
/* Path 3 : (up,stay) */
iAp = iA + COffsetA;
rap_ptrS[iAc] += symm_path_multiplier *
(ra[iR] * a_ptr[iAp] );
/* Path 4 : (down,stay) */
iAp = iA - COffsetA;
rap_ptrD[iAc] += rb[iR] * a_ptr[iAp] ;
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
/*-----------------------------------------------------
* If A stencil index is +1 in coarsened direction, need
* to calculate (r,p) pairs (stay,up) (stay,down) (up,stay)
* and (down,stay). Paths 1 & 3 {(s,u),(u,s)} contribute
* to RAP with same stencil index as A. Paths 2 & 4
* {(s,d),(d,s)} contribute to RAP with index 0 in
* coarsened direction.
*-----------------------------------------------------*/
case 1:
rap_ptrU = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
hypre_IndexD(index,cdir) = 0;
AOffsetP = hypre_BoxOffsetDistance(P_dbox, index);
rap_ptrS = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
/*--------------------------------------------------
* If A stencil index is zero except in coarsened
* dirction and RAP is symmetric, must calculate
* symmetric paths for (stay,down) and (down,stay).
* These contribute to the diagonal entry of RAP.
* These additional paths have the same numerical
* contribution as the calculated path. We multiply
* by two to account for them.
*--------------------------------------------------*/
symm_path_multiplier = 1;
diag = 0;
for (d = 0; d < dim; d++)
{
diag += hypre_IndexD(index,d) * hypre_IndexD(index,d);
}
if (diag == 0 && hypre_StructMatrixSymmetric(RAP))
{
symm_path_multiplier = 2;
}
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAp,iPp) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
/* Path 1 : (stay,up) */
iPp = iP + COffsetP + AOffsetP;
rap_ptrU[iAc] += a_ptr[iA] * pb[iPp];
/* Path 2 : (stay,down) */
iPp = iP + AOffsetP;
rap_ptrS[iAc] += symm_path_multiplier *
(a_ptr[iA] * pa[iPp]);
/* Path 3 : (up,stay) */
iAp = iA + COffsetA;
rap_ptrU[iAc] += ra[iR] * a_ptr[iAp] ;
/* Path 4 : (down,stay) */
iAp = iA - COffsetA;
rap_ptrS[iAc] += symm_path_multiplier *
(rb[iR] * a_ptr[iAp] );
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
break;
} /* end of switch */
} /* end of if a_ptr != NULL */
} /* end if coarse_symm_element == -1 */
} /* end of RAPloop */
} /* end ForBoxI */
/*-----------------------------------------------------------------
* Loop over boxes to collapse entries of RAP when period = 1 in
* the coarsened direction.
*-----------------------------------------------------------------*/
if (hypre_IndexD(hypre_StructGridPeriodic(cgrid),cdir) == 1)
{
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_BoxGetSize(cgrid_box, loop_size);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*--------------------------------------------------------------
* Computational loop. A loop over stored entries of RAP.
*-------------------------------------------------------------*/
for (RAPloop = 0; RAPloop < coarse_stencil_size; RAPloop++)
{
if (coarse_symm_elements[RAPloop] == -1)
{
hypre_CopyIndex(coarse_stencil_shape[RAPloop], index);
switch (hypre_IndexD(index, cdir))
{
/*-----------------------------------------------------
* If RAP stencil index is 0 in coarsened direction,
* leave entry unchanged.
*-----------------------------------------------------*/
case 0:
break;
/*-----------------------------------------------------
* If RAP stencil index is +/-1 in coarsened direction,
* to add entry to cooresponding entry with 0 in the
* coarsened direction. Also zero out current index.
*-----------------------------------------------------*/
default:
/*---------------------------------------------------------
* Get pointer to the current RAP index (rap_ptrD)
* and cooresponding index with 0 in the coarsened
* direction (rap_ptrS).
*---------------------------------------------------------*/
rap_ptrD = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
hypre_IndexD(index,cdir) = 0;
rap_ptrS = hypre_StructMatrixExtractPointerByIndex(RAP,
ci, index);
/*--------------------------------------------------
* If RAP stencil index is zero except in coarsened
* direction and RAP is symmetric, must
* double entry when modifying the diagonal.
*--------------------------------------------------*/
symm_path_multiplier = 1;
diag = 0;
for (d = 0; d < dim; d++)
{
diag += hypre_IndexD(index,d) * hypre_IndexD(index,d);
}
if (diag == 0 && hypre_StructMatrixSymmetric(RAP))
{
symm_path_multiplier = 2;
}
hypre_BoxLoop1Begin(hypre_StructMatrixDim(A), loop_size,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
rap_ptrS[iAc] += symm_path_multiplier *
(rap_ptrD[iAc]);
rap_ptrD[iAc] = zero;
}
hypre_BoxLoop1End(iAc);
break;
} /* end of switch */
} /* end if coarse_symm_element == -1 */
} /* end of RAPloop */
} /* end ForBoxI */
} /* if periodic */
return hypre_error_flag;
}
|
eavlSceneRendererSimpleVR.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_SCENE_RENDERER_SIMPLE_VR_H
#define EAVL_SCENE_RENDERER_SIMPLE_VR_H
#include "eavlDataSet.h"
#include "eavlCellSet.h"
#include "eavlColor.h"
#include "eavlColorTable.h"
#include "eavlSceneRenderer.h"
#include "eavlTimer.h"
// ****************************************************************************
// Class: eavlSceneRendererSimpleVR
//
// Purpose:
/// A very simple volume renderer.
//
// Programmer: Jeremy Meredith
// Creation: July 28, 2014
//
// Modifications:
//
// ****************************************************************************
class eavlSceneRendererSimpleVR : public eavlSceneRenderer
{
int nsamples;
vector<float> samples;
vector<byte> rgba;
vector<float> depth;
vector<eavlPoint3> p[4];
vector<float> values;
double mindepth, maxdepth;
eavlMatrix4x4 XFORM;
eavlMatrix4x4 IXFORM;
eavlView lastview;
bool PartialDeterminantMode;
public:
eavlSceneRendererSimpleVR()
{
nsamples = 400;
PartialDeterminantMode = true;
}
virtual ~eavlSceneRendererSimpleVR()
{
}
virtual void StartScene()
{
//cerr << "StartScene\n";
eavlSceneRenderer::StartScene();
p[0].clear();
p[1].clear();
p[2].clear();
p[3].clear();
values.clear();
lastview = eavlView(); // force re-composite
}
virtual void EndScene()
{
eavlSceneRenderer::EndScene();
}
void Composite()
{
int th = eavlTimer::Start();
//cerr << "Composite\n";
//
// composite all samples back-to-front
//
//cerr << "color[0] = " <<eavlColor(colors[0],colors[1],colors[2]) << endl;
float *alphas = new float[ncolors];
for (int i=0; i<ncolors; ++i)
{
float value = float(i)/float(ncolors-1);
float center = 0.5;
float sigma = 0.13;
float alpha = exp(-(value-center)*(value-center)/(2*sigma*sigma));
//float alpha = .5;
alphas[i] = alpha;
}
int w = view.w;
int h = view.h;
#pragma omp parallel for collapse(2)
for (int x=0; x<w; ++x)
{
for (int y=0; y<h; ++y)
{
eavlColor color(0,0,0,0);
int minz = nsamples;
for (int z=nsamples-1; z>=0; --z)
{
int index3d = (y*view.w + x)*nsamples + z;
float value = samples[index3d];
if (value<0 || value>1)
continue;
int colorindex = float(ncolors-1) * value;
eavlColor c(colors[colorindex*3+0],
colors[colorindex*3+1],
colors[colorindex*3+2],
1.0);
// use a gaussian density function as the opactiy
float attenuation = 0.02;
float alpha = alphas[colorindex];
alpha *= attenuation;
color.c[0] = color.c[0] * (1.-alpha) + c.c[0] * alpha;
color.c[1] = color.c[1] * (1.-alpha) + c.c[1] * alpha;
color.c[2] = color.c[2] * (1.-alpha) + c.c[2] * alpha;
color.c[3] = color.c[3] * (1.-alpha) + c.c[3] * alpha;
minz = z;
}
int index = (y*view.w + x);
if (minz < nsamples)
{
float projdepth = float(minz)*(maxdepth-mindepth)/float(nsamples) + mindepth;
depth[index] = .5 * projdepth + .5;
}
rgba[index*4 + 0] = color.c[0]*255.;
rgba[index*4 + 1] = color.c[1]*255.;
rgba[index*4 + 2] = color.c[2]*255.;
rgba[index*4 + 3] = color.c[2]*255.;
}
}
delete[] alphas;
double comptime = eavlTimer::Stop(th,"compositing");
if (false)
cerr << "compositing time = "<<comptime << endl;
}
// ------------------------------------------------------------------------
bool TetBarycentricCoords(eavlPoint3 p0,
eavlPoint3 p1,
eavlPoint3 p2,
eavlPoint3 p3,
eavlPoint3 p,
float &b0, float &b1, float &b2, float &b3)
{
eavlMatrix4x4 Mn(p0.x,p0.y,p0.z, 1,
p1.x,p1.y,p1.z, 1,
p2.x,p2.y,p2.z, 1,
p3.x,p3.y,p3.z, 1);
eavlMatrix4x4 M0(p.x ,p.y ,p.z , 1,
p1.x,p1.y,p1.z, 1,
p2.x,p2.y,p2.z, 1,
p3.x,p3.y,p3.z, 1);
eavlMatrix4x4 M1(p0.x,p0.y,p0.z, 1,
p.x ,p.y ,p.z , 1,
p2.x,p2.y,p2.z, 1,
p3.x,p3.y,p3.z, 1);
eavlMatrix4x4 M2(p0.x,p0.y,p0.z, 1,
p1.x,p1.y,p1.z, 1,
p.x ,p.y ,p.z , 1,
p3.x,p3.y,p3.z, 1);
eavlMatrix4x4 M3(p0.x,p0.y,p0.z, 1,
p1.x,p1.y,p1.z, 1,
p2.x,p2.y,p2.z, 1,
p.x ,p.y ,p.z , 1);
float Dn = Mn.Determinant();
float D0 = M0.Determinant();
float D1 = M1.Determinant();
float D2 = M2.Determinant();
float D3 = M3.Determinant();
if (Dn==0)
{
// degenerate tet
return false;
}
else if (Dn<0)
{
//cerr << "Dn negative\n";
if (D0>0 || D1>0 || D2>0 || D3>0)
return false;
}
else
{
//cerr << "Dn positive\n";
if (D0<0 || D1<0 || D2<0 || D3<0)
return false;
}
b0 = D0/Dn;
b1 = D1/Dn;
b2 = D2/Dn;
b3 = D3/Dn;
return true;
}
void TetPartialDeterminants(eavlPoint3 s0,
eavlPoint3 s1,
eavlPoint3 s2,
eavlPoint3 s3,
float &d_yz1_123,
float &d_xz1_123,
float &d_xy1_123,
float &d_xyz_123,
float &d_yz1_023,
float &d_xz1_023,
float &d_xy1_023,
float &d_xyz_023,
float &d_yz1_013,
float &d_xz1_013,
float &d_xy1_013,
float &d_xyz_013,
float &d_yz1_012,
float &d_xz1_012,
float &d_xy1_012,
float &d_xyz_012,
float &Dn)
{
double sx0 = s0.x, sy0 = s0.y, sz0 = s0.z;
double sx1 = s1.x, sy1 = s1.y, sz1 = s1.z;
double sx2 = s2.x, sy2 = s2.y, sz2 = s2.z;
double sx3 = s3.x, sy3 = s3.y, sz3 = s3.z;
float d_yz_01 = sy0*sz1 - sy1*sz0;
float d_yz_02 = sy0*sz2 - sy2*sz0;
float d_yz_03 = sy0*sz3 - sy3*sz0;
float d_yz_12 = sy1*sz2 - sy2*sz1;
float d_yz_13 = sy1*sz3 - sy3*sz1;
float d_yz_23 = sy2*sz3 - sy3*sz2;
float d_y1_01 = sy0 - sy1 ;
float d_y1_02 = sy0 - sy2 ;
float d_y1_03 = sy0 - sy3 ;
float d_y1_12 = sy1 - sy2 ;
float d_y1_13 = sy1 - sy3 ;
float d_y1_23 = sy2 - sy3 ;
float d_z1_01 = sz0 - sz1 ;
float d_z1_02 = sz0 - sz2 ;
float d_z1_03 = sz0 - sz3 ;
float d_z1_12 = sz1 - sz2 ;
float d_z1_13 = sz1 - sz3 ;
float d_z1_23 = sz2 - sz3 ;
d_yz1_123 = sy1 * d_z1_23 - sy2 * d_z1_13 + sy3 * d_z1_12;
d_xz1_123 = sx1 * d_z1_23 - sx2 * d_z1_13 + sx3 * d_z1_12;
d_xy1_123 = sx1 * d_y1_23 - sx2 * d_y1_13 + sx3 * d_y1_12;
d_xyz_123 = sx1 * d_yz_23 - sx2 * d_yz_13 + sx3 * d_yz_12;
d_yz1_023 = sy0 * d_z1_23 - sy2 * d_z1_03 + sy3 * d_z1_02;
d_xz1_023 = sx0 * d_z1_23 - sx2 * d_z1_03 + sx3 * d_z1_02;
d_xy1_023 = sx0 * d_y1_23 - sx2 * d_y1_03 + sx3 * d_y1_02;
d_xyz_023 = sx0 * d_yz_23 - sx2 * d_yz_03 + sx3 * d_yz_02;
d_yz1_013 = sy0 * d_z1_13 - sy1 * d_z1_03 + sy3 * d_z1_01;
d_xz1_013 = sx0 * d_z1_13 - sx1 * d_z1_03 + sx3 * d_z1_01;
d_xy1_013 = sx0 * d_y1_13 - sx1 * d_y1_03 + sx3 * d_y1_01;
d_xyz_013 = sx0 * d_yz_13 - sx1 * d_yz_03 + sx3 * d_yz_01;
d_yz1_012 = sy0 * d_z1_12 - sy1 * d_z1_02 + sy2 * d_z1_01;
d_xz1_012 = sx0 * d_z1_12 - sx1 * d_z1_02 + sx2 * d_z1_01;
d_xy1_012 = sx0 * d_y1_12 - sx1 * d_y1_02 + sx2 * d_y1_01;
d_xyz_012 = sx0 * d_yz_12 - sx1 * d_yz_02 + sx2 * d_yz_01;
Dn = sx0 * d_yz1_123 - sy0 * d_xz1_123 + sz0 * d_xy1_123 - 1 * d_xyz_123;
}
virtual void AddTetrahedronVs(double x0, double y0, double z0,
double x1, double y1, double z1,
double x2, double y2, double z2,
double x3, double y3, double z3,
double s0, double s1, double s2, double s3)
{
p[0].push_back(eavlPoint3(x0,y0,z0));
p[1].push_back(eavlPoint3(x1,y1,z1));
p[2].push_back(eavlPoint3(x2,y2,z2));
p[3].push_back(eavlPoint3(x3,y3,z3));
values.push_back(s0);
values.push_back(s1);
values.push_back(s2);
values.push_back(s3);
}
void ChangeView()
{
//cerr << "ChangeView\n";
rgba.clear();
depth.clear();
rgba.resize(4*view.w*view.h, 0);
depth.resize(view.w*view.h, 1.0f);
samples.clear();
samples.resize(view.w * view.h * nsamples,-1.0f);
float dist = (view.view3d.from - view.view3d.at).norm();
eavlPoint3 closest(0,0,-dist+view.size*.5);
eavlPoint3 farthest(0,0,-dist-view.size*.5);
mindepth = (view.P * closest).z;
maxdepth = (view.P * farthest).z;
eavlMatrix4x4 T,S;
T.CreateTranslate(1,1,-mindepth);
S.CreateScale(0.5 * view.w, 0.5*view.h, nsamples/(maxdepth-mindepth));
XFORM = S * T * view.P * view.V;
IXFORM = XFORM;
IXFORM.Invert();
}
void Sample()
{
int samples_tried = 0;
int samples_eval = 0;
int tets_eval = 0;
double zdepth_sum = 0;
//cerr << "Sample\n";
int th = eavlTimer::Start();
int n = p[0].size();
#pragma omp parallel for schedule(dynamic,1)
for (int tet = 0; tet < n ; tet++)
{
// translate the tet into image space
eavlPoint3 s[4];
eavlPoint3 mine(FLT_MAX,FLT_MAX,FLT_MAX);
eavlPoint3 maxe(-FLT_MAX,-FLT_MAX,-FLT_MAX);
for (int i=0; i<4; ++i)
{
s[i] = XFORM * p[i][tet];
for (int d=0; d<3; ++d)
{
if (s[i][d] < mine[d])
mine[d] = s[i][d];
if (s[i][d] > maxe[d])
maxe[d] = s[i][d];
}
}
// discard tets outside the view
if (maxe[0] < 0)
continue;
if (maxe[1] < 0)
continue;
if (maxe[2] < 0)
continue;
if (mine[0] >= view.w)
continue;
if (mine[1] >= view.h)
continue;
if (mine[2] >= nsamples)
continue;
// clamp extents to what's inside the view
if (mine[0] < 0)
mine[0] = 0;
if (mine[1] < 0)
mine[1] = 0;
if (mine[2] < 0)
mine[2] = 0;
if (maxe[0] >= view.w)
maxe[0] = view.w-1;
if (maxe[1] >= view.h)
maxe[1] = view.h-1;
if (maxe[2] >= nsamples)
maxe[2] = nsamples-1;
int xmin = ceil(mine[0]);
int xmax = floor(maxe[0]);
int ymin = ceil(mine[1]);
int ymax = floor(maxe[1]);
int zmin = ceil(mine[2]);
int zmax = floor(maxe[2]);
// ignore tet if it doesn't intersect any sample points
if (xmin > xmax || ymin > ymax || zmin > zmax)
continue;
tets_eval++;
// we genuinely need double precision for some of these calculations, by the way:
// change these next four to float, and you see obvious artifacts.
float d_yz1_123=0, d_xz1_123=0, d_xy1_123=0, d_xyz_123=0;
float d_yz1_023=0, d_xz1_023=0, d_xy1_023=0, d_xyz_023=0;
float d_yz1_013=0, d_xz1_013=0, d_xy1_013=0, d_xyz_013=0;
float d_yz1_012=0, d_xz1_012=0, d_xy1_012=0, d_xyz_012=0;
float Dn=1, iDn=1;
if (PartialDeterminantMode)
{
TetPartialDeterminants(s[0],s[1],s[2],s[3],
d_yz1_123, d_xz1_123, d_xy1_123, d_xyz_123,
d_yz1_023, d_xz1_023, d_xy1_023, d_xyz_023,
d_yz1_013, d_xz1_013, d_xy1_013, d_xyz_013,
d_yz1_012, d_xz1_012, d_xy1_012, d_xyz_012,
Dn);
if (Dn == 0)
{
// degenerate
continue;
}
iDn = 1. / Dn;
}
zdepth_sum += 1+zmax-zmin;
// in theory, we know whether or not CLAMP_Z_EXTENTS
// is useful for every tetrahedron based on the
// z depth of this tet's bounding box. I think
// it has to be 2 or more to be helpful. we can
// make this a per-tet decision
#define CLAMP_Z_EXTENTS
#ifdef CLAMP_Z_EXTENTS
if (d_xy1_123==0 ||
d_xy1_023==0 ||
d_xy1_013==0 ||
d_xy1_012==0)
{
// degenerate tetrahedron
continue;
}
float i123 = 1. / d_xy1_123;
float i023 = 1. / d_xy1_023;
float i013 = 1. / d_xy1_013;
float i012 = 1. / d_xy1_012;
#endif
// also, don't necessarily need to pull the samples
// from memory here; might be better to do them
// later and assume they're cached if necessary
float s0 = values[tet*4+0];
float s1 = values[tet*4+1];
float s2 = values[tet*4+2];
float s3 = values[tet*4+3];
// walk over samples covering the tet in each dimension
// and sample onto our regular grid
//#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int x=xmin; x<=xmax; ++x)
{
for(int y=ymin; y<=ymax; ++y)
{
int startindex = (y*view.w + x)*nsamples;
float t0 = x * d_yz1_123 - y * d_xz1_123 - 1. * d_xyz_123;
float t1 = -x * d_yz1_023 + y * d_xz1_023 + 1. * d_xyz_023;
float t2 = x * d_yz1_013 - y * d_xz1_013 - 1. * d_xyz_013;
float t3 = -x * d_yz1_012 + y * d_xz1_012 + 1. * d_xyz_012;
// timing note:
// without updating Z extents and just using bounding box,
// we accepted only about 10-15% of samples. (makes sense,
// given the size of a tet within a bounding cube)
// noise.silo, 400 samples, sample time = .080 to 0.087 with clamping
// = .083 to 0.105 without clamping
// without omp, max 1.0 (no clamp) drops to max 0.75 (clamp)
// in other words, CLAMP_Z_EXTENTS is a factor of 20-25% faster on noise, best case
// but on rect_cube, it's a factor of 270% faster (2.7x) on rect_cube!
// on noise_256, it's a small slowdown, 7%. (i think we're doing more divisions)
// maxes sense; once we're about 1 sample per tet, the extra divisions we need to do
// are only used about once, so it's better to just try out the samples
#ifdef CLAMP_Z_EXTENTS
float newzmin = zmin;
float newzmax = zmax;
float z0 = -t0 * i123;
float z1 = +t1 * i023;
float z2 = -t2 * i013;
float z3 = +t3 * i012;
if (-i123 < 0) { newzmin = std::max(newzmin,z0); } else { newzmax = std::min(newzmax,z0); }
if (+i023 < 0) { newzmin = std::max(newzmin,z1); } else { newzmax = std::min(newzmax,z1); }
if (-i013 < 0) { newzmin = std::max(newzmin,z2); } else { newzmax = std::min(newzmax,z2); }
if (+i012 < 0) { newzmin = std::max(newzmin,z3); } else { newzmax = std::min(newzmax,z3); }
newzmin = ceil(newzmin);
newzmax = floor(newzmax);
for(int z=newzmin; z<=newzmax; ++z)
#else
for(int z=zmin; z<=zmax; ++z)
#endif
{
samples_tried++;
float value;
if (!PartialDeterminantMode)
{
// Mode where we calculate the full barycentric
// coordinates from scratch each time.
float b0,b1,b2,b3;
bool isInside =
TetBarycentricCoords(s[0],s[1],s[2],s[3],
eavlPoint3(x,y,z),b0,b1,b2,b3);
if (!isInside)
continue;
value = b0*s0 + b1*s1 + b2*s2 + b3*s3;
}
else
{
// Mode where we pre-calculate partial determinants
// to avoid a bunch of redundant arithmetic.
float D0 = t0 + z * d_xy1_123;
float D1 = t1 - z * d_xy1_023;
float D2 = t2 + z * d_xy1_013;
float D3 = t3 - z * d_xy1_012;
// explicit calculation, without precalculating the constant and x/y terms
//float D0 = x * d_yz1_123 - y * d_xz1_123 + z * d_xy1_123 - 1. * d_xyz_123;
//float D1 = -x * d_yz1_023 + y * d_xz1_023 - z * d_xy1_023 + 1. * d_xyz_023;
//float D2 = x * d_yz1_013 - y * d_xz1_013 + z * d_xy1_013 - 1. * d_xyz_013;
//float D3 = -x * d_yz1_012 + y * d_xz1_012 - z * d_xy1_012 + 1. * d_xyz_012;
#ifndef CLAMP_Z_EXTENTS
// if we already clamped the Z extents, we know every sample
// is already inside the tetrahedron!
if (Dn<0)
{
// should NEVER fire unless there's a numerical precision error
//cerr << "Dn negative\n";
if (D0>0 || D1>0 || D2>0 || D3>0)
continue;
}
else
{
//cerr << "Dn positive\n";
if (D0<0 || D1<0 || D2<0 || D3<0)
continue;
}
#endif
value = (D0*s0 + D1*s1 + D2*s2 + D3*s3) * iDn;
}
int index3d = startindex + z;
samples[index3d] = value;
samples_eval++;
}
}
}
}
double sampletime = eavlTimer::Stop(th,"sample");
if (false)
{
// NOTE: These values should be ignored if OpenMP was enabled above:
cerr << zdepth_sum/double(n) << " average z samples per tet\n";
cerr << samples_eval << " out of " << samples_tried << " ("
<< (100.*double(samples_eval)/double(samples_tried)) << "%) samples\n";
cerr << tets_eval << " out of " << n << " ("
<< (100.*double(tets_eval)/double(n)) << "%) tetrahedra\n";
cerr << "w="<<view.w<<" h="<<view.h << endl;
cerr << "Sample time = "<<sampletime << endl;
}
}
// ------------------------------------------------------------------------
virtual void AddTriangleVnVs(double, double, double,
double, double, double,
double, double, double,
double, double, double,
double, double, double,
double, double, double,
double, double, double)
{
}
// ------------------------------------------------------------------------
virtual void AddPointVs(double, double, double, double, double)
{
}
// ------------------------------------------------------------------------
virtual void AddLineVs(double, double, double,
double, double, double,
double, double)
{
}
// ------------------------------------------------------------------------
virtual void Render()
{
if (lastview != view)
{
ChangeView();
Sample();
lastview = view;
}
Composite();
}
virtual unsigned char *GetRGBAPixels()
{
return &rgba[0];
}
virtual float *GetDepthPixels()
{
return &depth[0];
}
};
#endif
|
GB_binop__times_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__times_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__times_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int32)
// A*D function (colscale): GB (_AxD__times_int32)
// D*A function (rowscale): GB (_DxB__times_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int32)
// C=scalar+B GB (_bind1st__times_int32)
// C=scalar+B' GB (_bind1st_tran__times_int32)
// C=A+scalar GB (_bind2nd__times_int32)
// C=A'+scalar GB (_bind2nd_tran__times_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT32 || GxB_NO_TIMES_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bucket.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUCKET_H_
#define BUCKET_H_
#include <algorithm>
#include <cinttypes>
#include <iterator>
#include <vector>
/*
GAP Benchmark Suite
Class: Bucket
Author: Scott Beamer
Parallel container designed for constant time appends
- Threads should fill thread-local std::vector first
- When done with vector, call swap_vector_in(vector)
- Once started reading with iterator, should not modify or append anymore
- Like other iterators, comparing iterators for different objects is undefined
- Internally, built as a vector of vectors but appears contiguous by iterators
*/
template <typename T_>
class Bucket {
public:
size_t size() {
return num_elements_;
}
void clear() {
chunks_.resize(0);
num_elements_ = 0;
}
bool empty() {
return size() == 0;
}
void push_back(T_ to_add) {
if (chunks_.empty())
chunks_.emplace_back();
chunks_.back().push_back(to_add);
num_elements_++;
}
void swap_vector_in(std::vector<T_> &v) {
if (!v.empty()) {
#pragma omp critical
{
num_elements_ += v.size();
chunks_.emplace_back();
chunks_.back().swap(v);
}
}
}
void swap(Bucket<T_> &other) {
chunks_.swap(other.chunks_);
std::swap(num_elements_, other.num_elements_);
}
// Doesn't define every operator, but more than enough for OpenMP
class iterator : public std::iterator<std::random_access_iterator_tag, T_> {
public:
iterator(size_t index, size_t offset, std::vector<std::vector<T_>> &chunks)
: chunk_index_(index), chunk_offset_(offset), chunks_ref_(chunks) {}
T_ operator*() const {
return chunks_ref_[chunk_index_][chunk_offset_];
}
T_& operator*() {
return chunks_ref_[chunk_index_][chunk_offset_];
}
const iterator &operator++() {
chunk_offset_++;
if (chunk_offset_ == chunks_ref_[chunk_index_].size()) {
chunk_offset_ = 0;
chunk_index_++;
}
return *this;
}
iterator operator++(int) {
iterator copy(*this);
chunk_offset_++;
if (chunk_offset_ == chunks_ref_[chunk_index_].size()) {
chunk_offset_ = 0;
chunk_index_++;
}
return copy;
}
iterator & operator +=(int64_t to_add) {
if (to_add > 0) {
while ((to_add != 0) && (chunk_index_ < chunks_ref_.size())) {
chunk_offset_ += to_add;
if (chunk_offset_ >= chunks_ref_[chunk_index_].size()) {
to_add = chunk_offset_ - chunks_ref_[chunk_index_].size();
chunk_offset_ = 0;
chunk_index_++;
} else {
to_add = 0; // success
}
}
} else {
while ((to_add != 0) && (chunk_index_ >= 0)) {
chunk_offset_ += to_add;
if (chunk_offset_ < 0) {
chunk_index_--;
to_add = chunk_offset_;
chunk_offset_ = chunks_ref_[chunk_index_].size() - 1;
} else {
to_add = 0; // success
}
}
}
return *this;
}
int64_t operator -(const iterator &other) const {
if (chunk_index_ == other.chunk_index_)
return chunk_offset_ - other.chunk_offset_;
size_t op_index = chunk_index_;
int64_t total = 0;
if (op_index > other.chunk_index_) {
total += chunk_offset_;
while (op_index > other.chunk_index_) {
op_index--;
total += chunks_ref_[op_index].size();
}
return total - other.chunk_offset_;
} else {
total += chunks_ref_[op_index].size() - chunk_offset_;
op_index++;
while (op_index < other.chunk_index_) {
total += chunks_ref_[op_index].size();
op_index++;
}
return total + other.chunk_offset_;
}
}
bool operator <(const iterator &other) const {
if (chunk_index_ == other.chunk_index_)
return chunk_offset_ < other.chunk_offset_;
else
return chunk_index_ < other.chunk_index_;
}
bool operator==(const iterator &other) const {
return (chunk_index_ == other.chunk_index_) &&
(chunk_offset_ == other.chunk_offset_) &&
(chunks_ref_ == other.chunks_ref_);
}
bool operator!=(const iterator &other) const {
return !(operator==(other));
}
private:
size_t chunk_index_;
size_t chunk_offset_;
std::vector<std::vector<T_>> &chunks_ref_;
};
iterator begin() { return iterator(0, 0, chunks_); }
iterator end() { return iterator(chunks_.size(), 0, chunks_); }
private:
std::vector<std::vector<T_>> chunks_;
size_t num_elements_ = 0;
};
#endif // BUCKET_H_
|
DRB060-matrixmultiply-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Classic i-k-j matrix multiplication
*/
#define N 100
#define M 100
#define K 100
double a[N][M],b[M][K],c[N][K];
int init()
{
int i,j,k;
#pragma omp parallel for private(i ,j ,k )
for (i = 0; i < N; i++)
#pragma omp parallel for private(j ,k )
for (k = 0; k < K; k++)
#pragma omp parallel for private(j )
for (j = 0; j < M; j++) {
c[i][j] = i * j;
a[i][k] = i * j;
b[k][j] = i * j;
}
return 0;
}
int mmm()
{
int i,j,k;
#pragma omp parallel for private(i ,j ,k )
for (i = 0; i < N; i++)
#pragma omp parallel for private(j ,k )
for (k = 0; k < K; k++)
#pragma omp parallel for private(j )
for (j = 0; j < M; j++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
return 0;
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++)
for (k = 0; k < K; k++)
for (j = 0; j < M; j++)
printf("%lf %lf %lf\n", c[i][j],a[i][k],b[k][j]);
return 0;
}
int main()
{
init();
mmm();
print();
return 0;
}
|
helper.h |
// =================================================================================================
// This file is part of the CodeVault project. The project is licensed under Apache Version 2.0.
// CodeVault is part of the EU-project PRACE-4IP (WP7.3.C).
//
// Author(s):
// Valeriu Codreanu <valeriu.codreanu@surfsara.nl>
//
// This file contains helper functions for the Dense Linear Algebra examples.
// =================================================================================================
#include <assert.h>
#include <math.h>
#ifdef __OPENMP
#include <omp.h>
#endif
#include <sys/time.h>
#include <ctime>
/* Remove if already defined */
typedef long long int64; typedef unsigned long long uint64;
/* Returns the amount of milliseconds elapsed since the UNIX epoch. Works on both
* windows and linux. */
uint64 get_time_uint64()
{
/* Linux */
struct timeval tv;
gettimeofday(&tv, NULL);
uint64 ret = tv.tv_usec;
/* Convert from micro seconds (10^-6) to milliseconds (10^-3) */
ret /= 1000;
/* Adds the seconds (10^0) after converting them to milliseconds (10^-3) */
ret += (tv.tv_sec * 1000);
return ret;
}
inline bool
compareReference(const float *reference, const float *data,
const unsigned int len, const float epsilon)
{
assert(epsilon >= 0);
float error = 0;
float ref = 0;
#ifdef __OPENMP
#pragma omp parallel for
#endif
for (unsigned int i = 0; i < len; ++i)
{
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float norm_ref = sqrtf(ref);
if (fabs(ref) < 1e-7)
{
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float norm_error = sqrtf(error);
error = norm_error / norm_ref;
bool result = error < epsilon;
#ifdef _DEBUG
if (! result)
{
std::cerr << "ERROR, l2-norm error "
<< error << " is greater than epsilon " << epsilon << "\n";
}
#endif
return result;
}
|
GB_unop__sin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sin_fc64_fc64)
// op(A') function: GB (_unop_tran__sin_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = csin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = csin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sin_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sin_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
branch.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: Branch
PURPOSE: This program tests the effect of inner-loop branches on
application performance. We investigate four cases. The first
three all concern light-weight loops, i.e. loops that have
very few instructions associated with them.
1) branches inside vectorizable loops where the branch does
not necessarily inhibit vectorization: vector_go
2) branches inside vectorizable loops where the branch does
inhibit vectorization: vector_stop
3) branches inside non-vectorizable loops: no_vector
4) branches inside non-vectorizable loops in which each branch
corresponds to a sizeable and different set of instructions:
ins-heavy
CONSTRAINTS:
- the code should be continuously scalable, i.e. the user should
be able to specify the amount of work to be done.
- the code should be verifiable.
- the code should be executable with and without branches, with
otherwise identical amounts of work, to assess the impact of the
branches.
- the performance of the code should be dominated by the work in
the loops, not by memory bandwidth required to fetch data. This
means that arrays should fit in cache, and any loop over arrays
should be executed many times to amortize the initial memory load
costs and to remove noise from the timings.
- any arrays used should be initialized only once, to avoid confusing
performance impact of initialization with that of the branches.
Because the base loop over the array is short, it completes very
quickly, leading to very noisy results if it were timed separately.
Hence, we must time the ensemble of all iterations over the base
loop, which would include reinitializations if present.
- the branches should be "unpredictable," meaning that if the compiler
guesses them to be always taken or to be always not taken, it will
be wrong often. Otherwise the cost of a mispredicted branch may
not show up in the performance results.
- the amount of work in the codes containing the three different
types of light-weight loops should be the same to allow fair
comparisions.
- the code should not not produce overflow or underflow.
- the actual cost of computing the branch condition should be small,
so that we can assess the cost of the occurrence of the branch as
it disrupts vectorization and the hardware pipelines). If the
condition were expensive to compute and we run the code with and
without the branch, the performance difference would be exaggerated.
- Note: Casts from integer to float or double are not always vectorizable.
APPROACH:
- to avoid casts and keep conditionals inexpensive and exact, we use
only integer operations.
- we make sure that the numerical results of the codes for the
different branch structures and for the different paths following
the branch are identical.
- conditionals are simple comparisons to zero of quantities that
are computed anyway.
- initialization produces a saw-tooth pattern with frequent sign
crossings to defeat speculative branch execution.
- successive iterations over a relatively short array result simply
in a change of sign of all array elements, so that the results are
bounded, and verification values are easily computable.
USAGE: The program takes as input the number of threads, the length of the
vector loop, the number of repetitions of the loop, and the type of
branching
<progname> <# threads> <# iterations> <vector length> <branch_type>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
fill_vec()
func*()
HISTORY: Written by Rob Van der Wijngaart, May 2006.
**********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* the following values are only used as labels */
#define VECTOR_STOP 66
#define VECTOR_GO 77
#define NO_VECTOR 88
#define INS_HEAVY 99
#define WITH_BRANCHES 1
#define WITHOUT_BRANCHES 0
extern int fill_vec(int *vector, int vector_length, int iterations, int branch,
int *nfunc, int *rank);
int main(int argc, char ** argv)
{
int my_ID; /* Thread ID */
int vector_length; /* length of vector loop containing the branch */
int nfunc; /* number of functions used in INS_HEAVY option */
int rank; /* matrix rank used in INS_HEAVY option */
double branch_time, /* timing parameters */
no_branch_time;
double ops; /* double precision representation of integer ops */
int iterations; /* number of times the branch loop is carried out */
int i, iter, aux; /* dummies */
char *branch_type; /* string defining branching type */
int btype; /* integer encoding branching type */
int total=0,
total_ref; /* computed and stored verification values */
int nthread_input; /* thread parameters */
int nthread;
int num_error=0; /* flag that signals that requested and obtained
numbers of threads are the same */
/**********************************************************************************
** process and test input parameters
**********************************************************************************/
if (argc != 5){
printf("Usage: %s <# threads> <# iterations> <vector length>", *argv);
printf("<branching type>\n");
printf("branching type: vector_go, vector_stop, no_vector, ins_heavy\n");
exit(EXIT_FAILURE);
}
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1 || iterations%2==1){
printf("ERROR: Iterations must be positive and even : %d \n", iterations);
exit(EXIT_FAILURE);
}
vector_length = atoi(*++argv);
if (vector_length < 1){
printf("ERROR: loop length must be >= 1 : %d \n",vector_length);
exit(EXIT_FAILURE);
}
branch_type = *++argv;
if (!strcmp(branch_type,"vector_stop")) btype = VECTOR_STOP;
else if (!strcmp(branch_type,"vector_go" )) btype = VECTOR_GO;
else if (!strcmp(branch_type,"no_vector" )) btype = NO_VECTOR;
else if (!strcmp(branch_type,"ins_heavy" )) btype = INS_HEAVY;
else {
printf("Wrong branch type: %s; choose vector_stop, vector_go, ", branch_type);
printf("no_vector, or ins_heavy\n");
exit(EXIT_FAILURE);
}
#pragma omp parallel private(i, my_ID, iter, aux, nfunc, rank) reduction(+:total)
{
int * RESTRICT vector; int * RESTRICT index;
int factor = -1;
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP Branching Bonanza\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %d\n", nthread_input);
printf("Vector length = %d\n", vector_length);
printf("Number of iterations = %d\n", iterations);
printf("Branching type = %s\n", branch_type);
}
}
bail_out(num_error);
my_ID = omp_get_thread_num();
vector = malloc(vector_length*2*sizeof(int));
if (!vector) {
printf("ERROR: Thread %d failed to allocate space for vector\n", my_ID);
num_error = 1;
}
bail_out(num_error);
/* grab the second half of vector to store index array */
index = vector + vector_length;
/* initialize the array with entries with varying signs; array "index" is only
used to obfuscate the compiler (i.e. it won't vectorize a loop containing
indirect referencing). It functions as the identity operator. */
for (i=0; i<vector_length; i++) {
vector[i] = 3 - (i&7);
index[i] = i;
}
#pragma omp barrier
#pragma omp master
{
branch_time = wtime();
}
/* do actual branching */
switch (btype) {
case VECTOR_STOP:
/* condition vector[index[i]]>0 inhibits vectorization */
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3 - (i&7));
if (vector[index[i]]>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3 - (i&7));
if (vector[index[i]]>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
}
break;
case VECTOR_GO:
/* condition aux>0 allows vectorization */
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3 - (i&7));
if (aux>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3 - (i&7));
if (aux>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
}
break;
case NO_VECTOR:
/* condition aux>0 allows vectorization, but indirect indexing inbibits it */
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3 - (i&7));
if (aux>0) vector[i] -= 2*vector[index[i]];
else vector[i] -= 2*aux;
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3 - (i&7));
if (aux>0) vector[i] -= 2*vector[index[i]];
else vector[i] -= 2*aux;
}
}
break;
case INS_HEAVY:
fill_vec(vector, vector_length, iterations, WITH_BRANCHES, &nfunc, &rank);
}
#pragma omp master
{
branch_time = wtime() - branch_time;
if (btype == INS_HEAVY) {
printf("Number of matrix functions = %d\n", nfunc);
printf("Matrix order = %d\n", rank);
}
}
/* do the whole thing once more, but now without branches */
#pragma omp barrier
#pragma omp master
{
no_branch_time = wtime();
}
/* do actual branching */
switch (btype) {
case VECTOR_STOP:
case VECTOR_GO:
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3-(i&7));
vector[i] -= (vector[i] + aux);
}
for (i=0; i<vector_length; i++) {
aux = (3-(i&7));
vector[i] -= (vector[i] + aux);
}
}
break;
case NO_VECTOR:
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3-(i&7));
vector[i] -= (vector[index[i]]+aux);
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3-(i&7));
vector[i] -= (vector[index[i]]+aux);
}
}
break;
case INS_HEAVY:
fill_vec(vector, vector_length, iterations, WITHOUT_BRANCHES, &nfunc, &rank);
}
#pragma omp master
{
no_branch_time = wtime() - no_branch_time;
ops = (double)vector_length * (double)iterations * (double)nthread;
if (btype == INS_HEAVY) ops *= rank*(rank*19 + 6);
else ops *= 4;
}
for (total = 0, i=0; i<vector_length; i++) total += vector[i];
} /* end of OPENMP parallel region */
/* compute verification values */
total_ref = ((vector_length%8)*(vector_length%8-8) + vector_length)/2*nthread;
if (total == total_ref) {
printf("Solution validates\n");
printf("Rate (Mops/s) with branches: %lf time (s): %lf\n",
ops/(branch_time*1.e6), branch_time);
printf("Rate (Mops/s) without branches: %lf time (s): %lf\n",
ops/(no_branch_time*1.e6), no_branch_time);
#ifdef VERBOSE
printf("Array sum = %d, reference value = %d\n", total, total_ref);
#endif
}
else {
printf("ERROR: array sum = %d, reference value = %d\n", total, total_ref);
}
exit(EXIT_SUCCESS);
}
|
Vector.h | #pragma once
#include <debug.h>
#include <random>
#include <vector>
namespace freeaml
{
/**
* @brief @c Vector<T> is an extension of @c std::vector<T> for mathematical
* applications.
*
* This class stores a sequence of elements of type @c T. It overloads the
* addition (+), subtraction (-), multiplication (*) and division (/) operators
* for supporting common vector operations such as vector addition, vector
* multiplication by scalar and vector dot product.
*
* Functions for computing @a L<sup>p</sup> norms as well as other commonly used
* mathematical operations are also provided in the class.
*
* Support for OpenMP was added to the functions and operators which showed a
* significant speedup when implemented using multiple threads.
*/
template<typename T>
class Vector : public std::vector<T>
{
public:
using BaseVector = std::vector<T>;
using size_type = typename BaseVector::size_type;
/** @brief Constructs a vector with no elements. */
Vector();
/**
* @brief Constructs a vector with the contents of an initializer list.
* @param init An initializer list holding elements of type @c T.
*/
Vector(std::initializer_list<T> init);
/**
* @brief Constructs a vector with default-initialized elements.
* @param n The length of the vector.
*/
explicit Vector(size_type n);
/**
* @brief Constructs a vector with all elements initialized with a value.
* @param n The length of the vector.
* @param x The initializing value for every element of the vector.
*/
Vector(size_type n, const T& x);
/**
* @brief Constructs a vector with the contents of a range.
* @param first An iterator pointing to the first range element.
* @param last An iterator pointing to one-past-the-last range element.
*/
template<class InputIterator>
Vector(InputIterator first, InputIterator last);
/**
* @brief Copy constructor.
* @param v The vector from which all elements will be copied.
*/
Vector(const Vector& v) = default;
/**
* @brief Move constructor.
* @param v The vector from which all elements will be moved.
*/
Vector(Vector&& v) = default;
/** @brief Destructor. */
~Vector() = default;
/**
* @brief Copy-assignment operator.
* @param v The vector from which all elements will be copied.
* @return A reference to @c *this.
*/
Vector& operator=(const Vector& v) = default;
/**
* @brief Move-assignment operator.
* @param v The vector from which all elements will be moved.
* @return A reference to @c *this.
*/
Vector& operator=(Vector&& v) = default;
/**
* @brief Multiplies all elements of the vector by a scalar.
* @param c A scalar.
* @return A reference to @c *this.
*/
Vector& operator*=(const T& c);
/**
* @brief Divides all elements of the vector by a scalar.
* @param c A scalar.
* @return A reference to @c *this.
*/
Vector& operator/=(const T& c);
/**
* @brief Performs element-wise addition-assignment with another vector.
* @param v A vector.
* @return A reference to @c *this.
*/
Vector& operator+=(const Vector& v);
/**
* @brief Performs element-wise subtraction-assignment with another vector.
* @param v A vector.
* @return A reference to @c *this.
*/
Vector& operator-=(const Vector& v);
/**
* @brief Computes the @a L<sup>1</sup>-norm of the vector.
* @return The @a L<sup>1</sup>-norm of the vector.
*/
T l1_norm() const;
/**
* @brief Computes the @a L<sup>2</sup>-norm of the vector.
* @return The @a L<sup>2</sup>-norm of the vector.
*/
T l2_norm() const;
/**
* @brief Computes the @a L<sup>p</sup>-norm of the vector.
* @param p A scalar defining the norm to compute.
* @return The @a L<sup>p</sup>-norm of the vector.
*/
T lp_norm(const T& p) const;
/**
* @brief Computes the @a L<sup>∞</sup>-norm of the vector.
* @return The @a L<sup>∞</sup>-norm of the vector.
*/
T linf_norm() const;
/**
* @brief Computes the sum of all elements of the vector.
* @return The sum of all elements of the vector.
*/
T sum() const;
/**
* @brief Computes the arithmetic mean of the elements of the vector.
* @return The arithmetic mean of the elements of the vector.
*/
T mean() const;
}; /* class Vector<T> */
/**
* @brief Computes the multiplication of a vector by a scalar on the right.
* @param v A vector.
* @param c A scalar.
* @return A copy of @c v with all elements multiplied by @c c.
*/
template<typename T>
Vector<T> operator*(const Vector<T>& v, const T& c);
/**
* @brief Computes the multiplication of a vector by a scalar on the left.
* @param c A scalar.
* @param v A vector.
* @return A copy of @c v with all elements multiplied by @c c.
*/
template<typename T>
Vector<T> operator*(const T& c, const Vector<T>& v);
/**
* @brief Computes the dot product of two equally-sized vectors.
* @param v1 A vector.
* @param v2 A vector.
* @return The dot product of @c v1 and @c v2.
*/
template<typename T>
T operator*(const Vector<T>& v1, const Vector<T>& v2);
/**
* @brief Computes the division of a vector by a scalar.
* @param v A vector.
* @param c A scalar.
* @return A copy of @c v with all elements divided by @c c.
*/
template<typename T>
Vector<T> operator/(const Vector<T>& v, const T& c);
/**
* @brief Computes the vectorial addition of two equally-sized vectors.
* @param v1 A vector.
* @param v2 A vector.
* @return The element-wise sum of @c v1 and @c v2.
*/
template<typename T>
Vector<T> operator+(const Vector<T>& v1, const Vector<T>& v2);
/**
* @brief Computes the vectorial difference of two equally-sized vectors.
* @param v1 A vector.
* @param v2 A vector.
* @return The element-wise difference between @c v1 and @c v2.
*/
template<typename T>
Vector<T> operator-(const Vector<T>& v1, const Vector<T>& v2);
/**
* @brief Computes the element-wise negation of a vector.
* @param v A vector.
* @return The element-wise negation of @c v.
*/
template<typename T>
Vector<T> operator-(const Vector<T>& v);
/**
* @brief Prints the elements of a vector to an output stream.
* @param stream An output stream.
* @param v A vector.
* @return A reference to @c stream.
*/
template<typename T>
std::ostream& operator<<(std::ostream& stream, const Vector<T>& v);
/**
* @brief Generates a random vector with elements within a specified range.
* @param n The size of the vector to generate.
* @param lower_bound The lower bound for the sample interval.
* @param upper_bound The upper bound for the sample interval.
* @return A vector with @c n elements sampled uniformly from
* <tt>[lower_bound, upper_bound]</tt>.
* @note This function was designed to work only with primitive integer and
* floating-point types (e.g. @c int, @c float, @c double etc.).
*/
template<typename T>
Vector<T> random_vector(typename Vector<T>::size_type n,
const T& lower_bound = T{0},
const T& upper_bound = T{1});
/*******************************************************************************
*
* FUNCTION DEFINITIONS
*
******************************************************************************/
template<typename T>
Vector<T>::Vector()
{
/* nothing needs to be done here */
}
template<typename T>
Vector<T>::Vector(const std::initializer_list<T> init) : BaseVector(init)
{
/* nothing needs to be done here */
}
template<typename T>
Vector<T>::Vector(const size_type n) : BaseVector(n)
{
/* nothing needs to be done here */
}
template<typename T>
Vector<T>::Vector(const size_type n, const T& x) : BaseVector(n, x)
{
/* nothing needs to be done here */
}
template<typename T>
template<typename InputIterator>
Vector<T>::Vector(const InputIterator first, const InputIterator last)
: BaseVector(first, last)
{
/* nothing needs to be done here */
}
template<typename T>
Vector<T>& Vector<T>::operator*=(const T& c)
{
for (T& x : *this)
{
x *= c;
}
return *this;
}
template<typename T>
Vector<T>& Vector<T>::operator/=(const T& c)
{
for (T& x : *this)
{
x /= c;
}
return *this;
}
template<typename T>
Vector<T>& Vector<T>::operator+=(const Vector<T>& v)
{
FREEAML_ASSERT((*this).size() == v.size());
for (size_type i = 0; i < (*this).size(); ++i)
{
(*this)[i] += v[i];
}
return *this;
}
template<typename T>
Vector<T>& Vector<T>::operator-=(const Vector<T>& v)
{
FREEAML_ASSERT((*this).size() == v.size());
for (size_type i = 0; i < (*this).size(); ++i)
{
(*this)[i] -= v[i];
}
return *this;
}
template<typename T>
T Vector<T>::l1_norm() const
{
T norm{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_norm{};
#pragma omp for nowait
for (size_type i = 0; i < (*this).size(); ++i)
{
local_norm += std::abs((*this)[i]);
}
#pragma omp critical
{
norm += local_norm;
}
}
#else
/* serial implementation */
for (const T& x : *this)
{
norm += std::abs(x);
}
#endif /* #ifdef _OPENMP */
return norm;
}
template<typename T>
T Vector<T>::l2_norm() const
{
T norm{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_norm = T{};
#pragma omp for nowait
for (size_type i = 0; i < (*this).size(); ++i)
{
local_norm += std::abs((*this)[i]) * std::abs((*this)[i]);
}
#pragma omp critical
{
norm += local_norm;
}
}
#else
/* serial implementation */
for (const T& x : *this)
{
norm += std::abs(x) * std::abs(x);
}
#endif /* #ifdef _OPENMP */
return std::sqrt(norm);
}
template<typename T>
T Vector<T>::lp_norm(const T& p) const
{
T norm{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_norm{};
#pragma omp for nowait
for (size_type i = 0; i < (*this).size(); ++i)
{
local_norm += std::pow(std::abs((*this)[i]), p);
}
#pragma omp critical
{
norm += local_norm;
}
}
#else
/* serial implementation */
for (const T& x : *this)
{
norm += std::pow(std::abs(x), p);
}
#endif /* #ifdef _OPENMP */
return std::pow(norm, T{1} / p);
}
template<typename T>
T Vector<T>::linf_norm() const
{
T norm{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_norm{};
#pragma omp for nowait
for (size_type i = 0; i < (*this).size(); ++i)
{
local_norm = std::max(local_norm, std::abs((*this)[i]));
}
#pragma omp critical
{
norm = std::max(local_norm, norm);
}
}
#else
/* serial implementation */
for (const T& x : *this)
{
norm = std::max(norm, std::abs(x));
}
#endif /* #ifdef _OPENMP */
return norm;
}
template<typename T>
T Vector<T>::sum() const
{
T sum{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_sum{};
#pragma omp for nowait
for (size_type i = 0; i < (*this).size(); ++i)
{
local_sum += (*this)[i];
}
#pragma omp critical
{
sum += local_sum;
}
}
#else
/* serial implementation */
for (const T& x : *this)
{
sum += x;
}
#endif /* #ifdef _OPENMP */
return sum;
}
template<typename T>
T Vector<T>::mean() const
{
if ((*this).empty() == false)
{
return sum() / static_cast<T>((*this).size());
}
else
{
return T{};
}
}
template<typename T>
Vector<T> operator*(const Vector<T>& v, const T& c)
{
Vector<T> result = v;
result *= c;
return result;
}
template<typename T>
Vector<T> operator*(const T& c, const Vector<T>& v)
{
return v * c;
}
template<typename T>
T operator*(const Vector<T>& v1, const Vector<T>& v2)
{
FREEAML_ASSERT(v1.size() == v2.size());
using size_type = typename Vector<T>::size_type;
T dot_product{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_dot_product{};
#pragma omp for nowait
for (size_type i = 0; i < v1.size(); ++i)
{
local_dot_product += v1[i] * v2[i];
}
#pragma omp critical
{
dot_product += local_dot_product;
}
}
#else
/* serial implementation */
for (size_type i = 0; i < v1.size(); ++i)
{
dot_product += v1[i] * v2[i];
}
#endif /* #ifdef _OPENMP */
return dot_product;
}
template<typename T>
Vector<T> operator/(const Vector<T>& v, const T& c)
{
Vector<T> result = v;
result /= c;
return result;
}
template<typename T>
Vector<T> operator+(const Vector<T>& v1, const Vector<T>& v2)
{
FREEAML_ASSERT(v1.size() == v2.size());
Vector<T> result = v1;
result += v2;
return result;
}
template<typename T>
Vector<T> operator-(const Vector<T>& v1, const Vector<T>& v2)
{
FREEAML_ASSERT(v1.size() == v2.size());
Vector<T> result = v1;
result -= v2;
return result;
}
template<typename T>
Vector<T> operator-(const Vector<T>& v)
{
Vector<T> result;
result.reserve(v.size());
for (const T& x : v)
{
result.push_back(-x);
}
return result;
}
template<typename T>
std::ostream& operator<<(std::ostream& stream, const Vector<T>& v)
{
stream << "[";
for (typename Vector<T>::size_type i = 0; i < v.size(); ++i)
{
stream << v[i] << (i + 1 == v.size() ? "" : ", ");
}
stream << "]";
return stream;
}
template<typename T>
Vector<T> random_vector(const typename Vector<T>::size_type n,
const T& lower_bound /* = T{0} */,
const T& upper_bound /* = T{1} */)
{
FREEAML_ASSERT(lower_bound < upper_bound);
using DistributionType =
typename std::conditional<std::is_integral<T>::value,
std::uniform_int_distribution<T>,
std::uniform_real_distribution<T>>::type;
std::random_device device;
std::mt19937_64 generator(device());
DistributionType distribution(lower_bound, upper_bound);
Vector<T> v;
v.reserve(n);
while (v.size() < n)
{
v.push_back(distribution(generator));
}
return v;
}
} /* namespace freeaml */
|
GB_binop__eq_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__eq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc64)
// C=scalar+B GB (_bind1st__eq_fc64)
// C=scalar+B' GB (_bind1st_tran__eq_fc64)
// C=A+scalar GB (_bind2nd__eq_fc64)
// C=A'+scalar GB (_bind2nd_tran__eq_fc64)
// C type: bool
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_eq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = (creal (Ax [pA]) != 0) || (cimag (Ax [pA]) != 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = (creal (Bx [pB]) != 0) || (cimag (Bx [pB]) != 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC64_eq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FC64 || GxB_NO_EQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_eq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_eq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_eq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_eq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_fp32
// A.*B function (eWiseMult): GB_AemultB__div_fp32
// A*D function (colscale): GB_AxD__div_fp32
// D*A function (rowscale): GB_DxB__div_fp32
// C+=B function (dense accum): GB_Cdense_accumB__div_fp32
// C+=b function (dense accum): GB_Cdense_accumb__div_fp32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_fp32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_fp32
// C=scalar+B GB_bind1st__div_fp32
// C=scalar+B' GB_bind1st_tran__div_fp32
// C=A+scalar GB_bind2nd__div_fp32
// C=A'+scalar GB_bind2nd_tran__div_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij / bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x / y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FP32 || GxB_NO_DIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__div_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = (x / bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = (aij / y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x / aij) ; \
}
GrB_Info GB_bind1st_tran__div_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij / y) ; \
}
GrB_Info GB_bind2nd_tran__div_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (MagickPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *image,
const size_t number_images)
{
register ssize_t
i,
j;
MagickPixelPacket
**pixels;
size_t
length,
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
length=image->columns;
if (length < number_images)
length=number_images;
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) length; j++)
GetMagickPixelPacket(image,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-
(int) MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
Quantum pixel,const MagickEvaluateOperator op,const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
const Image
*next;
Image
*evaluate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict evaluate_pixels,
zero;
RandomInfo
**restrict random_info;
size_t
number_images;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=images; next != (Image *) NULL; next=GetNextImageInList(next))
if ((next->columns != images->columns) || (next->rows != images->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ImageWidthsOrHeightsDiffer","`%s'",images->filename);
return((Image *) NULL);
}
/*
Initialize evaluate next attributes.
*/
evaluate_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (evaluate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(evaluate_image,DirectClass) == MagickFalse)
{
InheritException(exception,&evaluate_image->exception);
evaluate_image=DestroyImage(evaluate_image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images,number_images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
evaluate_image=DestroyImage(evaluate_image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireCacheView(evaluate_image);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status)
#endif
for (y=0; y < (ssize_t) evaluate_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
evaluate_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelOpacity(p),op,evaluate_pixel[i].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
if (evaluate_image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(
evaluate_pixel[i/2].opacity));
else
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (evaluate_image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
evaluate_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status)
#endif
for (y=0; y < (ssize_t) evaluate_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
evaluate_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelOpacity(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (evaluate_image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=QuantumScale;
evaluate_pixel[x].green*=QuantumScale;
evaluate_pixel[x].blue*=QuantumScale;
evaluate_pixel[x].opacity*=QuantumScale;
evaluate_pixel[x].index*=QuantumScale;
}
}
for (x=0; x < (ssize_t) evaluate_image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
if (evaluate_image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (evaluate_image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
evaluate_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
evaluate_image=DestroyImage(evaluate_image);
return(evaluate_image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**restrict random_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelRed(q),op,value)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelGreen(q),op,value)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id],
GetPixelBlue(q),op,value)));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],GetPixelOpacity(q),op,value)));
else
SetPixelAlpha(q,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],(Quantum) GetPixelAlpha(q),op,value)));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ClampToQuantum(ApplyEvaluateOperator(
random_info[id],GetPixelIndex(indexes+x),op,value)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImageChannel)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImageChannel)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].variance-
channel_statistics[RedChannel].mean*
channel_statistics[RedChannel].mean;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].variance-
channel_statistics[GreenChannel].mean*
channel_statistics[GreenChannel].mean;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].variance-
channel_statistics[BlueChannel].mean*
channel_statistics[BlueChannel].mean;
channels++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[OpacityChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].variance-
channel_statistics[OpacityChannel].mean*
channel_statistics[OpacityChannel].mean;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlackChannel].variance-
channel_statistics[BlackChannel].mean*
channel_statistics[BlackChannel].mean;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelOpacity(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelOpacity(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p)*GetPixelOpacity(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
mean+=GetPixelIndex(indexes+x);
sum_squares+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_cubes+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_fourth_power+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-1.0E-37);
*minima=1.0E+37;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
if (pixel.opacity < *minima)
*minima=(double) pixel.opacity;
if (pixel.opacity > *maxima)
*maxima=(double) pixel.opacity;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) GetPixelIndex(indexes+x) < *minima)
*minima=(double) GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > *maxima)
*maxima=(double) GetPixelIndex(indexes+x);
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-1.0E-37);
channel_statistics[i].minima=1.0E+37;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelRed(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelGreen(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelBlue(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelOpacity(p) != ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(p),range),range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
status=GetPixelIndex(indexes+x) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelIndex(indexes+x),range),range) ?
MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
if (image->matte != MagickFalse)
{
if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double)
GetPixelOpacity(p);
if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double)
GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
}
x++;
p++;
}
}
area=(double) image->columns*image->rows;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].sum/=area;
channel_statistics[i].sum_squared/=area;
channel_statistics[i].sum_cubed/=area;
channel_statistics[i].sum_fourth_power/=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].standard_deviation=sqrt(
channel_statistics[i].variance-(channel_statistics[i].mean*
channel_statistics[i].mean));
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].variance/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
if (channel_statistics[i].standard_deviation == 0.0)
continue;
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-
3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+
2.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-
4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+
6.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation)-3.0;
}
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static inline MagickRealType MagickAbsoluteValue(const MagickRealType x)
{
if (x < 0)
return(-x);
return(x);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**restrict pixel_list;
ssize_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
statistic_view=AcquireCacheView(statistic_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict statistic_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict s;
register const PixelPacket
*restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+
neighbor_width*neighbor_height/2+x,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
return(statistic_image);
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if (IsGrayImageType(type))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertAdobe98ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertDisplayP3ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertProPhotoToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToAdobe98(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToAdobe98(X,Y,Z,r,g,b);
}
static void ConvertRGBToDisplayP3(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToDisplayP3(X,Y,Z,r,g,b);
}
static void ConvertRGBToProPhoto(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToProPhoto(X,Y,Z,r,g,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,const IlluminantType illuminant,double *L,double *u,
double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158*
DecodePixelGamma(GetPixelGreen(image,q))+0.072186*
DecodePixelGamma(GetPixelBlue(image,q));
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case Adobe98Colorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case Adobe98Colorspace:
{
ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z);
break;
}
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case DisplayP3Colorspace:
{
ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case ProPhotoColorspace:
{
ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002*
PerceptibleReciprocal(film_gamma)))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
MagickBooleanType
is_bilevel;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageMonochrome(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
is_bilevel=IdentifyImageMonochrome(image,exception);
if (is_bilevel == MagickFalse)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=BilevelType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158*
EncodePixelGamma(GetPixelGreen(image,q))+0.072186*
EncodePixelGamma(GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case Adobe98Colorspace:
case CMYColorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case Adobe98Colorspace:
{
ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case DisplayP3Colorspace:
{
ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case ProPhotoColorspace:
{
ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma))-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
GB_binop__plus_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int16)
// A*D function (colscale): GB (_AxD__plus_int16)
// D*A function (rowscale): GB (_DxB__plus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int16)
// C=scalar+B GB (_bind1st__plus_int16)
// C=scalar+B' GB (_bind1st_tran__plus_int16)
// C=A+scalar GB (_bind2nd__plus_int16)
// C=A'+scalar GB (_bind2nd_tran__plus_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT16 || GxB_NO_PLUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
histogram(parallel).c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
int find_bin(float data,float bin_maxes[],int bin_count ,float min_meas );
void printHistogram(float bin_maxes[],int bin_counts[],int bin_count ,float min_meas);
int main(int argc, char* argv[]) {
clock_t begin, end;
double runtime;
int data_count=20;
float data[]={1.3,2.9,0.4,0.3,1.3,4.4,1.7,0.4,3.2,0.3,4.9,2.4,3.1,4.4,3.9,0.4,4.2,4.5,4.9,0.9};
float min_meas=0.3;
float max_meas=4.9;
int bin_count=5, i;
float bin_maxes[5];
int* bin_counts;
float bin_width = (max_meas - min_meas)/bin_count;
// Clock starts
begin = clock();
bin_counts = malloc(bin_count*sizeof(int));
#pragma omp parallel num_threads(10)
{
#pragma omp for
for (i = 0; i < bin_count; i++) {
bin_maxes[i] = min_meas + (i+1)*bin_width;
bin_counts[i] = 0;
}
#pragma omp barrier
int bin;
#pragma omp for
for (i = 0; i < data_count; i++) {
bin = find_bin(data[i], bin_maxes, bin_count, min_meas);
#pragma omp critical
bin_counts[bin]++;
}
}
printHistogram(bin_maxes, bin_counts, bin_count, min_meas);
free(bin_counts);
// Clock ends
end = clock();
runtime = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\n\nRuntime : %f\n\n", runtime);
return 0;
}
int find_bin(float data,float bin_maxes[],int bin_count,float min_meas) {
int bottom = 0, top = bin_count-1;
int mid;
float bin_max, bin_min;
while (bottom <= top) {
mid = (bottom + top)/2;
bin_max = bin_maxes[mid];
bin_min = (mid == 0) ? min_meas: bin_maxes[mid-1];
if (data >= bin_max)
bottom = mid+1;
else if (data < bin_min)
top = mid-1;
else
return mid;
}
}
void printHistogram(float bin_maxes[],int bin_counts[],int bin_count,float min_meas) {
int i, j;
float bin_max, bin_min;
for (i = 0; i < bin_count; i++) {
bin_max = bin_maxes[i];
bin_min = (i == 0) ? min_meas: bin_maxes[i-1];
printf("%.3f-%.3f:\t", bin_min, bin_max);
for (j = 0; j < bin_counts[i]; j++)
printf("X");
printf("\n");
}
}
|
ROF_TV_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ROF_TV_core.h"
#define EPS 1.0e-8
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
/*sign function*/
int sign(float x) {
return (x > 0) - (x < 0);
}
/* C-OMP implementation of ROF-TV denoising/regularization model [1] (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume [REQUIRED]
* 2. lambda - regularisation parameter (a constant or the same size as the input (1))
* 3. tau - marching step for explicit scheme, ~1 is recommended [REQUIRED]
* 4. Number of iterations, for explicit scheme >= 150 is recommended [REQUIRED]
* 5. eplsilon: tolerance constant
*
* Output:
* [1] Regularised image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms"
*/
/* Running iterations of TV-ROF function */
float TV_ROF_CPU_main(float *Input, float *Output, float *infovector, float *lambdaPar, int lambda_is_arr, int iterationsNumb, float tau, float epsil, int dimX, int dimY, int dimZ)
{
float *D1=NULL, *D2=NULL, *D3=NULL, *Output_prev=NULL;
float re, re1;
re = 0.0f; re1 = 0.0f;
int count = 0;
int i;
long DimTotal,j;
DimTotal = (long)(dimX*dimY*dimZ);
D1 = calloc(DimTotal, sizeof(float));
D2 = calloc(DimTotal, sizeof(float));
D3 = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* start TV iterations */
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
/* calculate differences */
D1_func(Output, D1, (long)(dimX), (long)(dimY), (long)(dimZ));
D2_func(Output, D2, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ > 1) D3_func(Output, D3, (long)(dimX), (long)(dimY), (long)(dimZ));
TV_kernel(D1, D2, D3, Output, Input, lambdaPar, lambda_is_arr, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
/* check early stopping criteria */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
if (re < epsil) count++;
if (count > 3) break;
}
}
free(D1);free(D2); free(D3);
if (epsil != 0.0f) free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/* calculate differences 1 */
float D1_func(float *A, float *D1, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1;
long i,j,k,i1,i2,k1,j1,j2,k2,index;
if (dimZ > 1) {
#pragma omp parallel for shared (A, D1, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1,NOMy_1,NOMy_0,NOMz_1,NOMz_0,denom1,denom2,denom3,T1)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + j*dimX + i1] - A[index]; /* y+ */
/*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */
NOMy_0 = A[index] - A[(dimX*dimY)*k + j*dimX + i2]; /* y- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
NOMz_0 = A[index] - A[(dimX*dimY)*k2 + j*dimX + i]; /* z- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0)));
denom3 = denom3*denom3;
T1 = sqrt(denom1 + denom2 + denom3 + EPS);
D1[index] = NOMx_1/T1;
}}}
}
else {
#pragma omp parallel for shared (A, D1, dimX, dimY) private(i, j, i1, j1, i2, j2,NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1,index)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */
/*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */
NOMy_0 = A[index] - A[(j)*dimX + i2]; /* y- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom2 = denom2*denom2;
T1 = sqrtf(denom1 + denom2 + EPS);
D1[index] = NOMx_1/T1;
}}
}
return *D1;
}
/* calculate differences 2 */
float D2_func(float *A, float *D2, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2;
long i,j,k,i1,i2,k1,j1,j2,k2,index;
if (dimZ > 1) {
#pragma omp parallel for shared (A, D2, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */
NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
NOMz_0 = A[index] - A[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0)));
denom3 = denom3*denom3;
T2 = sqrtf(denom1 + denom2 + denom3 + EPS);
D2[index] = NOMy_1/T2;
}}}
}
else {
#pragma omp parallel for shared (A, D2, dimX, dimY) private(i, j, i1, j1, i2, j2, NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2,index)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */
NOMx_0 = A[index] - A[j2*dimX + i]; /* x- */
/*NOMy_0 = A[(i)*dimY + j] - A[(i)*dimY + j2]; */ /* y- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
T2 = sqrtf(denom1 + denom2 + EPS);
D2[index] = NOMy_1/T2;
}}
}
return *D2;
}
/* calculate differences 3 */
float D3_func(float *A, float *D3, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3;
long index,i,j,k,i1,i2,k1,j1,j2,k2;
#pragma omp parallel for shared (A, D3, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMy_0, NOMx_0, NOMz_1, denom1, denom2, denom3, T3)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */
NOMy_0 = A[index] - A[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */
NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
/*NOMz_0 = A[(dimX*dimY)*k + (i)*dimY + j] - A[(dimX*dimY)*k2 + (i)*dimY + j]; */ /* z- */
denom1 = NOMz_1*NOMz_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom3 = denom3*denom3;
T3 = sqrtf(denom1 + denom2 + denom3 + EPS);
D3[index] = NOMz_1/T3;
}}}
return *D3;
}
/* calculate divergence */
float TV_kernel(float *D1, float *D2, float *D3, float *B, float *A, float *lambda, int lambda_is_arr, float tau, long dimX, long dimY, long dimZ)
{
float dv1, dv2, dv3, lambda_val;
long index,i,j,k,i1,i2,k1,j1,j2,k2;
if (dimZ > 1) {
#pragma omp parallel for shared (D1, D2, D3, B, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, dv1,dv2,dv3,lambda_val)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
lambda_val = *(lambda + index* lambda_is_arr);
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/*divergence components */
dv1 = D1[index] - D1[(dimX*dimY)*k + j2*dimX+i];
dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2];
dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i];
B[index] += tau*(lambda_val*(dv1 + dv2 + dv3) - (B[index] - A[index]));
}}}
}
else {
#pragma omp parallel for shared (D1, D2, B, dimX, dimY) private(index, i, j, i1, j1, i2, j2,dv1,dv2,lambda_val)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
lambda_val = *(lambda + index* lambda_is_arr);
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* divergence components */
dv1 = D1[index] - D1[j2*dimX + i];
dv2 = D2[index] - D2[j*dimX + i2];
B[index] += tau*(lambda_val*(dv1 + dv2) - (B[index] - A[index]));
}}
}
return *B;
}
|
blocking-omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include "mpi.h"
struct timeval startwtime, endwtime1, endwtime2;
double seq_time1, seq_time2;
double **minDist;
double **minLabels;
double* packer(double **received, int blocksize, int LINESIZE);
double** unpacker(double *toReceive,int blocksize, int LINESIZE);
void knnSearch(int rank, int l, double **local, double **received, int blocksize,int LINESIZE, int nbrs);
void pointCompare(long i, long j, int nbrs, double *pointA, double *pointB, int LINESIZE);
double Ndistance(double *pointA, double *pointB, int LINESIZE);
void bubbleSort(int i,int nbrs);
void swap(int i, int k);
int main(int argc, char** argv){
char filename[100];
int MAX = 0;
int LINESIZE = 0;
int nbrs = 0;
int i,j,l;
int blocksize = 0;
double **local;
double **received;
double *toSend;
double *toReceive;
int rank;
int p; //==number of procs
MPI_File fp;
MPI_File fpResults;
MPI_Status status;
MPI_Offset offset;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if(argc!=5){
printf("ERROR: usage:\n%s filename MAX nbrs threads\n", argv[0]);
printf("filename is the name of the .bin file to take data from\n");
printf("MAX is the number of elements to take part in the search\n");
printf("nbrs is the number of the nearest neighbours search for each point\n");
printf("threads is the number of threads to be used for OpenMP\n");
exit(1);
}
MAX = atoi(argv[2]);
nbrs = atoi(argv[3]);
blocksize = MAX/p;
omp_set_num_threads(atoi(argv[4]));
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
LINESIZE = 30;
}
else{
LINESIZE = 784;
}
//creating blocks
local = (double **) malloc(blocksize*sizeof(double*));
received = (double **) malloc(blocksize*sizeof(double*));
toSend = (double *) malloc(blocksize*LINESIZE*sizeof(double));
toReceive = (double *) malloc(blocksize*LINESIZE*sizeof(double));
for(i=0; i<blocksize; i++){
local[i] = (double *) malloc(LINESIZE*sizeof(double));
received[i] = (double *) malloc(LINESIZE*sizeof(double));
}
//initialising results array
minDist = (double **) malloc(MAX*sizeof(double*));
minLabels = (double **) malloc(MAX*sizeof(double*));
for(i=0; i<MAX; i++){
minDist[i] = (double *) malloc(nbrs*sizeof(double));
minLabels[i] = (double *) malloc(nbrs*sizeof(double));
for(j=0; j<nbrs; j++){
//presetting minDist to sth very big
minDist[i][j] = 1000;
minLabels[i][j] = -1;
}
}
strcat(filename, ".bin");
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fp)){
printf("Error reading file from Process %d (fp)\n", rank);
exit(1);
}
else{
//block reading
if(rank==0){
printf("Initialising kNN search for problem size %d and k = %d\nusing archive %s\n", MAX, nbrs, filename);
}
for(i=0; i<blocksize; i++){
for(j=0; j<LINESIZE ; j++){
offset = rank*blocksize*LINESIZE*sizeof(double)+i*LINESIZE*sizeof(double)+j*sizeof(double);
MPI_File_read_at(fp, offset, &local[i][j], 1, MPI_DOUBLE, &status);
}
}
//blockprint(local, blocksize, rank);
MPI_Barrier(MPI_COMM_WORLD);
MPI_File_close(&fp);
knnSearch(rank, rank, local, local, blocksize,LINESIZE, nbrs);
}
//2d array to 1d array:
toSend = packer(local, blocksize, LINESIZE);
if(rank==0) gettimeofday (&startwtime, NULL);
//circulation of blocks
for(l=0; l<(p-1); l++){
if(rank!=0){
MPI_Recv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, rank-1, 10, MPI_COMM_WORLD, &status);
}
if(rank!=(p-1)){
MPI_Ssend(toSend, blocksize*LINESIZE, MPI_DOUBLE, rank+1, 10, MPI_COMM_WORLD);
}
else{
MPI_Ssend(toSend, blocksize*LINESIZE, MPI_DOUBLE, 0, 10, MPI_COMM_WORLD);
}
if(rank==0){
MPI_Recv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, p-1, 10, MPI_COMM_WORLD, &status);
}
//1d to 2d array
received = unpacker(toReceive, blocksize, LINESIZE);
int tmp = status.MPI_SOURCE;
for(int t=0; t<l ;t++){
tmp--;
if(tmp<0) tmp = p-1;
}
//time for blocking COMMS
//only on the last rep will count
//rank 0 recieves last
if((rank==0) && (l==(p-2)) ) gettimeofday (&endwtime1, NULL);
knnSearch(rank, tmp, local, received, blocksize, LINESIZE, nbrs);
toSend = packer(received, blocksize, LINESIZE);
}
//preparing to send results to proc 0
toSend = (double *) realloc(toSend, blocksize*nbrs*sizeof(double));
toReceive = (double *) realloc(toReceive, blocksize*nbrs*sizeof(double));
if(rank==0){
for(i=1; i<p; i++){
MPI_Recv(toSend, blocksize*nbrs, MPI_DOUBLE, i, 15, MPI_COMM_WORLD, &status);
received = unpacker(toSend, blocksize, nbrs);
for(j=0; j<blocksize; j++){
for(int k=0; k<nbrs; k++){
minDist[status.MPI_SOURCE*blocksize+j][k] = received[j][k];
}
}
MPI_Recv(toReceive, blocksize*nbrs, MPI_DOUBLE, i, 20, MPI_COMM_WORLD, &status);
received = unpacker(toReceive, blocksize, nbrs);
for(j=0; j<blocksize; j++){
for(int k=0; k<nbrs; k++){
minLabels[status.MPI_SOURCE*blocksize+j][k] = received[j][k];
}
}
}
}
else{
//toSend buffer used for minDist
//toReceive buffer used for minLabels
for(i=0; i<blocksize; i++){
for(j=0; j<nbrs; j++){
toSend[i*nbrs+j] = minDist[rank*blocksize+i][j];
toReceive[i*nbrs+j] = minLabels[rank*blocksize+i][j];
}
}
MPI_Send(toSend, blocksize*nbrs, MPI_DOUBLE, 0, 15, MPI_COMM_WORLD);
MPI_Send(toReceive, blocksize*nbrs, MPI_DOUBLE, 0, 20, MPI_COMM_WORLD);
}
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
if(rank==0){
gettimeofday (&endwtime2, NULL);
}
/*
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-labels-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking-labels.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-dist-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking-dist.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fpResults)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
//printing results in two seperate files
for(i=0; i<blocksize; i++){
char buf[100];
for(j=0; j<nbrs; j++){
if(minLabels[rank*blocksize+i][j]==-1) printf("ERROR\n");
//offset = rank*blocksize*LINESIZE*sizeof(char)+i*LINESIZE*sizeof(char)+j*sizeof(char);
offset = rank*blocksize*LINESIZE*sizeof(char)+i*LINESIZE*sizeof(char)+j*sizeof(char);
sprintf(buf, "%f ", minLabels[rank*blocksize+i][j]);
MPI_File_write_at(fp, offset, buf, strlen(buf), MPI_CHAR, &status);
sprintf(buf, "%f ", minDist[rank*blocksize+i][j]);
MPI_File_write_at(fpResults, offset, buf, strlen(buf), MPI_CHAR, &status);
//printf("#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
}
}
*/
if(rank==0){
//printing results in a sigle file in easily readable form from proc 0 ONLY
for(i=0; i<p*blocksize; i++){
char buf[100];
sprintf( buf, "Top %d closest to point %d:\n",nbrs, i);
MPI_File_write(fp, buf, strlen(buf), MPI_CHAR, &status);
//printf("Top %d closest to point %d:\n",nbrs, i);
for(j=0; j<nbrs; j++){
if(minLabels[i][j]==-1) printf("ERROR\n");
sprintf(buf, "#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
MPI_File_write(fp, buf, strlen(buf), MPI_CHAR, &status);
//printf("#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
}
}
}
if(rank==0){
seq_time1 = (double)((endwtime1.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime1.tv_sec - startwtime.tv_sec);
printf("COMMS Wall clock time = %f\n", seq_time1);
seq_time2 = (double)((endwtime2.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime2.tv_sec - startwtime.tv_sec);
printf("FINAL Wall clock time = %f\n", seq_time2);
printf("\nJob Done.\n");
}
MPI_File_close(&fp);
MPI_File_close(&fpResults);
free(local);
free(received);
free(toSend);
free(toReceive);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return (0);
}
double* packer(double **received,int blocksize, int LINESIZE){
int i,j;
double *temp;
temp = (double *) malloc(blocksize*LINESIZE*sizeof(double));
for(i=0; i<blocksize; i++){
for(j=0; j<LINESIZE; j++){
temp[i*LINESIZE+j]=received[i][j];
}
}
return temp;
}
double** unpacker(double *toReceive,int blocksize, int LINESIZE){
int i,j;
double **temp;
temp = (double **) malloc(blocksize*sizeof(double));
for(i=0; i<blocksize; i++){
temp[i] = (double *) malloc(LINESIZE*sizeof(double));
for(j=0; j<LINESIZE; j++){
temp[i][j]= toReceive[i*LINESIZE+j];
}
}
return temp;
}
void knnSearch(int rank, int l, double **local, double **received, int blocksize, int LINESIZE, int nbrs){
int i,j,k;
double *pointA;
double *pointB;
pointA = (double *) malloc(LINESIZE*sizeof(double));
pointB = (double *) malloc(LINESIZE*sizeof(double));
#pragma omp parallel for
for(i=0; i<blocksize; i++){
//reading pointA from block local
for(k=0; k<LINESIZE; k++){
pointA[k]=local[i][k];
}
for(j=0; j<blocksize; j++){
//reading pointB from block received
for(k=0; k<LINESIZE; k++){
pointB[k]=received[j][k];
}
pointCompare(rank*blocksize+i, l*blocksize+j, nbrs, pointA, pointB, LINESIZE);
}
}
}
void pointCompare(long i, long j, int nbrs, double *pointA, double *pointB, int LINESIZE){
double dist=0;
int k,n;
//calculating distance
dist=Ndistance(pointA, pointB, LINESIZE);
//sorting top k closest neighbours
bubbleSort(i, nbrs);
for(n=0; n<nbrs ; n++){
//if dist = 0 then pointA=pointB
if(dist>0 && dist<minDist[i][n]){
//pushing back all elements
//from the end to the point where this new dist will be inserted
for(k=(nbrs-1); k>n; k--){
minDist[i][k] = minDist[i][k-1];
minLabels[i][k] = minLabels[i][k-1];
}
minDist[i][n] = dist;
minLabels[i][n] = j;
break;
}
}
}
double Ndistance(double *pointA, double *pointB, int LINESIZE){
double dist=0;
for(int k=0; k<LINESIZE; k++){
dist += pow(pointA[k]-pointB[k],2);
}
return sqrt(dist);
}
void bubbleSort(int i,int nbrs){
int j,k;
for(j=0; j<nbrs; j++){
for(k=(nbrs-1); k>j; k--){
if(minDist[i][k-1]>minDist[i][k]){
swap(i,k);
}
}
}
}
void swap(int i, int k){
double tmp;
tmp = minDist[i][k-1];
minDist[i][k-1] = minDist[i][k];
minDist[i][k] = tmp;
tmp = minLabels[i][k-1];
minLabels[i][k-1] = minLabels[i][k];
minLabels[i][k] = tmp;
} |
DRB095-doall2-taskloop-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Only one loop is associated with omp taskloop.
The inner loop's loop iteration variable will be shared if it is shared in the enclosing context.
Data race pairs (we allow multiple ones to preserve the pattern):
Write_set = {j@69:14, j@69:30}
Read_set = {j@69:21, j@69:30, j@70:16}
Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair.
*/
#include <stdio.h>
int a[100][100];
int main()
{
int i, j;
#pragma omp parallel
{
#pragma omp for private(j) collapse(2)
for (i = 0; i < 100; i++)
for (j = 0; j < 100; j++)
a[i][j]+=1;
}
printf ("a[50][50]=%d\n", a[50][50]);
return 0;
}
|
GB_unaryop__ainv_uint32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_fp32
// op(A') function: GB_tran__ainv_uint32_fp32
// C type: uint32_t
// A type: float
// cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z ; GB_CAST_UNSIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_fp32
(
uint32_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
alignments_fast.c | /*-----------------------------------------------------------------------------
alignments_fast \
author: jose lezama/rafael grompone von gioi \
version: 8.1 (2014.08.12) \
year: 2014 \
desc: MatLab interface for aligned point detector v8 (w/candidate points). \
input: \
-req: input_points | Input points (Nx2 matrix). \
-req: candidate_points | Candidate points for alignments (Nx2 matrix). \
-opt: min_k | Minimum number of points in alignment. \
-opt: xsize | X axis domain size. \
-opt: ysize | Y axis domain size. \
-opt: epsilon | Detection threshold, (max. NFA). \
-opt: min_width | Minimal alignment width tested. \
-opt: locality | Size factor of locality. \
-opt: length_ratio | Min ratio b/w length and width. \
output: \
-6xNout alignments matrix with: x1, x2, y1, y2, width, nfa \
------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#include <float.h>
#include <time.h>
#include <omp.h>
#include "lib/jirafa_lib.h"
#include "lib/ntuples_aux.h"
#include "lib/alignments.h"
/** sqrt(2) */
#define SQRT2 1.414213562373
/* debug function */
#define dprint(expr) printf(#expr " = %g \n", (double) expr)
/*----------------------------------------------------------------------------*/
/* look for alignments */
/* (core of the algorithm) */
void detect_alignments(double logNT, double log_epsilon, ntuple_list points,
ntuple_list candidate_points,
double min_width, double locality, double length_ratio,
double min_k, int * cells,
alignments_list *all_alignments,
struct point_alignment *best_align){
int i,j,l,n;
double x,y,xc,yc,dx,dy,theta,lateral_dist,long_dist,fwd_dist;
double width;
struct point_alignment align;
ntuple_list ll;
/* initialize best_align */
best_align->nfa = -logNT; // -DBL_MAX;
best_align->l = NULL;
int NlocalLeft = 0;
int NlocalRight = 0;
int NlocalAlignment = 0;
int point_in_alignment=0;
/* test all possible candidate pairs*/
time_t mytimer;
mytimer = time(NULL);
int total_num_tests = 0;
#pragma omp parallel for schedule(dynamic) private(i, j, ll, align, l, n, x, y, xc, yc, dx, dy, theta, locality, lateral_dist, long_dist, fwd_dist, width, NlocalLeft, NlocalRight, NlocalAlignment, point_in_alignment, cells) shared(best_align, all_alignments, mytimer, total_num_tests)
for(i=0; i<candidate_points->size; i++)
for(j=i+1; j<candidate_points->size; j++)
{
/* if ( (int) (time(NULL)-mytimer) >3){ */
/* printf("looking for alignments i: %d, thread: %d \n", i, omp_get_thread_num()); */
/* mytimer = time(NULL); */
/* } */
align.x1 = candidate_points->values[ i*candidate_points->dim + 0 ];
align.y1 = candidate_points->values[ i*candidate_points->dim + 1 ];
align.x2 = candidate_points->values[ j*candidate_points->dim + 0 ];
align.y2 = candidate_points->values[ j*candidate_points->dim + 1 ];
align.len = dist(align.x1,align.y1,align.x2,align.y2);
if (align.len < length_ratio) continue;
xc = ( align.x1 + align.x2 ) / 2.0;
yc = ( align.y1 + align.y2 ) / 2.0;
theta = atan2( align.y2 - align.y1, align.x2 - align.x1 );
align.dx = dx = cos(theta);
align.dy = dy = sin(theta);
int wi;
width = (align.len / length_ratio);
int * cells2;
cells2 = (int *) calloc( (size_t) 1000, sizeof(int) );
for(wi = 0; wi <6 ; wi++)
{
width/=2;//SQRT2;
if(width < min_width) break;
align.width = width;
/* use around K cases where K is the number of points in the
alignment */
/* count number of points in alignment */
double K=0;
for(l=0;l<points->size;l++){
// TODO remove endpoints
x = points->values[ l*points->dim + 0 ];
y = points->values[ l*points->dim + 1 ];
lateral_dist = fabs( -(x-xc)*dy + (y-yc)*dx );
fwd_dist = (x-align.x1)*dx + (y-align.y1)*dy;
/* the point is in the alignment? */
if( lateral_dist < 0.5 * width &&
fwd_dist > 0 &&
fwd_dist < align.len )
K++;
}
if (K < min_k) {
continue;
}
/* explore a range of number of cases around K */
double min_n = (int) fmax(4,K/2);
double max_n = (int) 1.5*K;
for(n = min_n; n<=max_n; n=(int) n*SQRT2)
{
align.n = n;
align.s = align.len/(double) (n+1); /* cell size */
double loc_exp;
// 25 sep 2013, begin with square local window
locality = (align.len - width)/(2.0*width)*SQRT2;
//for(loc_exp = 2; loc_exp <=32; loc_exp *= 2 ){
for(loc_exp = 0; loc_exp <6; loc_exp++){
//locality = pow(2,loc_exp/2.0);
locality/=2; //SQRT2;
if (width*locality>250 || locality < 2) break;
/* po is the probability of 1 point falling in one case */
align.po = 1.0 / ( (double) n * (2.0*locality+1.0) );
/* /\* compute active cells and number of local points*\/ */
align.k = 0;
for(l=0;l<n;l++) cells2[l] = 0;
align.locality = locality;
align.Nlocal = 0;
/* new in algorithm 3: count points left and right of the
alignment */
NlocalLeft = NlocalRight = NlocalAlignment = 0;
/* initialize list of interior points */
ll = new_ntuple_list(1);
align.l = ll;// new_ntuple_list(1);
for(l=0;l<points->size;l++)
// TODO: remove endpoints
{
x = points->values[ l*points->dim + 0 ];
y = points->values[ l*points->dim + 1 ];
point_in_alignment = 0;
lateral_dist = fabs( -(x-xc)*dy + (y-yc)*dx );
fwd_dist = (x-align.x1)*dx + (y-align.y1)*dy;
long_dist = fabs( (x-xc)*dx + (y-yc)*dy );
/* the point is in the alignment? */
if( lateral_dist < 0.5 * width &&
fwd_dist > 0.5 * align.s &&
fwd_dist < ( (double) n + 0.5 ) * align.s )
{
point_in_alignment=1;
NlocalAlignment++;
cells2[ (int) (fwd_dist/align.s - 0.5) ] = 1;
/* point is in the alignment */
/* append the point */
append_point_to_alignment(&align, (unsigned int) l);
/* align.k++; */
}
/* the point is in the local area? */
if( lateral_dist < (locality+0.5)*width &&
long_dist < ( (double) n / 2.0 *align.s ) )
{
align.Nlocal++;
if(point_in_alignment <1)
if (-(x-xc)*dy + (y-yc)*dx >0)
NlocalLeft++;
else
NlocalRight++;
}
}
/* count occupied cases */
for(l=0;l<n;l++) align.k += cells2[l];
if (align.k <=0) {
free_ntuple_list(align.l);
continue;
}
/* /\* compute event probability *\/ */
align.Nlocal = 2*fmax(NlocalLeft,NlocalRight) + NlocalAlignment;
/* dprint(align.Nlocal); */
align.p = 1.0 - pow( 1.0 - align.po, (double) align.Nlocal );
if( align.p > 0.0 && align.p < 1.0 ){
align.nfa = nfa(n,align.k,align.p,logNT);
total_num_tests++;
}
else {
align.nfa = -logNT; // -DBL_MAX;
total_num_tests++;
}
#pragma omp critical
{
if( align.nfa > best_align->nfa ) {
if(best_align->l!=NULL) free_ntuple_list(best_align->l);
copy_align(&align,best_align);
}
if( align.nfa >= log_epsilon ) /* if significant event */
/*if( 1 ) /* if significant event */
{
append_alignment(all_alignments, &align);
}
}
free_ntuple_list(align.l);
}
}
}
free(cells2);
}
//printf("total real number of tests: %d\n", total_num_tests);
}
/*----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
/* Main */
/*----------------------------------------------------------------------------*/
int main(int argc, char ** argv)
{
return 0;
}
/*----------------------------------------------------------------------------*/
/* Matlab interface */
/*----------------------------------------------------------------------------*/
extern void mexFunction_alignment_fast(double * input_points, int X_in, int N_in,double * input_candidate_points,int NCP_in, double ** output_points, int *X_out, int *N_out);
void mexFunction_alignment_fast(double * input_points, int X_in, int N_in,double * input_candidate_points,int NCP_in, double ** output_points, int *X_out, int *N_out){
/*
input: list of 2D points and candidates
output: 7xN matrix with info for N alignment detections
*/
double xsize;
double ysize;
double epsilon;
double min_width;
double locality;
double min_k;
double length_ratio;
min_k = 5;
xsize = 512;
ysize = 512;
epsilon = 10;
min_width = 1;
locality = 10;
length_ratio = 25;
/* variable declarations here */
// double * input_points = mxGetPr(prhs[0]); // input points of size 2*N first N
// x coordinates, then N y
// coordinates
// double * input_candidate_points = mxGetPr(prhs[1]); // input points of size 2*N first N
// x coordinates, then N y
// coordinates
int X = X_in; //number of columns (should be 2)
int N = N_in; // number of points
int NCP = NCP_in; // number of candidate points
omp_set_num_threads(256);
struct point_alignment align, best_align;
/* initiate alignements list */
alignments_list *all_alignments = (alignments_list*)calloc(sizeof(alignments_list), 1);
all_alignments->array = (struct point_alignment*)realloc(all_alignments->array, (all_alignments->capacity = 4) * sizeof(struct point_alignment));
ntuple_list points;
ntuple_list candidate_points;
double x,y,xc,yc,dx,dy,theta,lateral_dist,long_dist,fwd_dist;
double width;
unsigned int i,j,l,n;
int num_test = 0;
double logNT;
int * cells;
FILE * eps;
time_t timer;
/* read input */
points = new_ntuple_list(2);
for(i=0;i<N;i++){
if( points->size >= points->max_size ) enlarge_ntuple_list(points);
points->values[ points->size * points->dim + 0 ] = input_points[i];
points->values[ points->size * points->dim + 1 ] = input_points[N+i];
points->size++;
}
candidate_points = new_ntuple_list(2);
for(i=0;i<NCP;i++){
if( candidate_points->size >= candidate_points->max_size ) enlarge_ntuple_list(candidate_points);
// printf("candidate_point_values[%d]=[%f %f]\n", i , input_candidate_points[i], input_candidate_points[NCP+i]);
candidate_points->values[ candidate_points->size * candidate_points->dim + 0 ] = input_candidate_points[i];
candidate_points->values[ candidate_points->size * candidate_points->dim + 1 ] = input_candidate_points[NCP+i];
candidate_points->size++;
}
n=points->size;
/* NUMBER OF TESTS */
num_test= n * (n-1)/2; /* widhts are ignored */
//printf("points: %d\n", n);
//printf("num_test: %d\n", num_test);
logNT = log10( (double) num_test );
double log_epsilon = log10(epsilon);
/* initialize best_align */
best_align.nfa = -logNT;
int nmax = dist(xsize,ysize,0.0,0.0) / min_width;
cells = (int *) calloc( (size_t) nmax, sizeof(int) );
if( cells == NULL ) error("not enough memory.");
/*--------------- detect alignments -------------------------------------- */
//printf("detecting alignments...\n");
timer = time(NULL);
detect_alignments(logNT, log_epsilon, points, candidate_points, min_width, locality, length_ratio,
min_k, cells, all_alignments, &best_align);
//printf("\nfinished detecting alignments...\n");
//printf("time elapsed: %d seconds\n", (int) (time(NULL)-timer));
dprint(best_align.nfa);
/*--------------- end detect alignments------------------------------------ */
/* sort alignements */
qsort ((*all_alignments).array, all_alignments->pos,
sizeof(struct point_alignment), compare_alignments);
/* --------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------- */
/* 1-vs-1 exclusion principle: start a list of alignments with the
most significant one, then one by one check if they are masked */
/* IMPORTANT: all_alignments are ordered backwards */
/* create the list */
//printf("applying exclusion principle on resulting %d alingments...\n", all_alignments->pos);
timer = time(NULL);
alignments_list *f1v1_alignments = (alignments_list*)calloc(sizeof(alignments_list), 1);
/* it is possible that there are no alignments*/
if (all_alignments->pos != 0){
/* append the first alignment to this list */
append_alignment(f1v1_alignments, &(all_alignments->array[all_alignments->pos-1]));
int ismasked;
signed int ai;
for(ai=(*all_alignments).pos-2;ai>=1;ai--){
struct point_alignment temp_alignment_b = all_alignments->array[ai];
ismasked=0;
for(j=0;j<(*f1v1_alignments).pos;j++){
struct point_alignment temp_alignment_a = f1v1_alignments->array[j];
/* check if alignments from final list mask alignment in
all-alignments list */
double im=is_masked_cases(points, &temp_alignment_a,
&temp_alignment_b, logNT);
if (im<=log_epsilon) ismasked=1;
}
if (ismasked ==0){ /* not masked, add to final list */
append_alignment(f1v1_alignments, &temp_alignment_b);
}
}
//printf( "number of final alignments after 1vs1: %d \n",(int) (*f1v1_alignments).pos);
}
//printf("\nfinished exclusion principle...\n");
//printf("time elapsed: %d seconds\n", (int) (time(NULL)-timer));
/* END of 1 vs 1 exclusion principle */
/* ------------------------------------------------------------------------ */
/*----------------- MATLAB: prepare output variables --------------------*/
/* output variables definitions */
int lw = 6; // x1,y1,x2,y2,width,-log10(NFA)
int n_out = (*f1v1_alignments).pos;
// plhs[0] = mxCreateDoubleMatrix( lw, n_out ,mxREAL);
//Get a pointer to the data space in our newly allocated memory
double* outArray = (double *) malloc (sizeof (double) * (lw * n_out));
//Copy matrix while multiplying each point by 2
int ii;
for( ii=(*f1v1_alignments).pos-1;ii>=0;ii--){ /* hack to do
backward loop */
i=ii;
struct point_alignment temp_alignment = f1v1_alignments->array[i];
outArray[ii*lw+0] = temp_alignment.x1;
outArray[ii*lw+1] = temp_alignment.y1;
outArray[ii*lw+2] = temp_alignment.x2;
outArray[ii*lw+3] = temp_alignment.y2;
outArray[ii*lw+4] = temp_alignment.width;
outArray[ii*lw+5] = temp_alignment.nfa;
}
/*--------------------------- end of MATLAB -------------------------------*/
/* free memory */
if(best_align.l != NULL) free_ntuple_list(best_align.l);
free_alignment_list(all_alignments);
free_alignment_list(f1v1_alignments);
free(cells);
free_ntuple_list(points);
*output_points = outArray;
*X_out = 6;
*N_out = n_out;
/*
free( (void*) cells );
free_ntuple_list(points);
*/
}
/*----------------------------------------------------------------------------*/
|
GB_unop__identity_fp64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_uint16)
// op(A') function: GB (_unop_tran__identity_fp64_uint16)
// C type: double
// A type: uint16_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_uint16)
(
double *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/image.c"
#else
#undef MAX
#define MAX(a,b) ( ((a)>(b)) ? (a) : (b) )
#undef MIN
#define MIN(a,b) ( ((a)<(b)) ? (a) : (b) )
#undef TAPI
#define TAPI __declspec(dllimport)
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#undef temp_t
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define temp_t real
#else
#define temp_t float
#endif
static inline real image_(FromIntermediate)(temp_t x) {
#ifdef TH_REAL_IS_BYTE
x += 0.5;
if( x <= 0 ) return 0;
if( x >= 255 ) return 255;
#endif
return x;
}
static void image_(Main_op_validate)( lua_State *L, THTensor *Tsrc, THTensor *Tdst){
long src_depth = 1;
long dst_depth = 1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
if(Tdst->nDimension == 3) dst_depth = Tdst->size[0];
if(Tsrc->nDimension == 3) src_depth = Tsrc->size[0];
if( (Tdst->nDimension==3 && ( src_depth!=dst_depth)) ||
(Tdst->nDimension!=Tsrc->nDimension) )
luaL_error(L, "image.scale: src and dst depths do not match");
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.scale: src and dst depths do not match");
}
static long image_(Main_op_stride)( THTensor *T,int i){
if (T->nDimension == 2) {
if (i == 0) return 0;
else return T->stride[i-1];
}
return T->stride[i];
}
static long image_(Main_op_depth)( THTensor *T){
if(T->nDimension == 3) return T->size[0]; /* rgb or rgba */
return 1; /* greyscale */
}
static void image_(Main_scaleLinear_rowcol)(THTensor *Tsrc,
THTensor *Tdst,
long src_start,
long dst_start,
long src_stride,
long dst_stride,
long src_len,
long dst_len ) {
real *src= THTensor_(data)(Tsrc);
real *dst= THTensor_(data)(Tdst);
if ( dst_len > src_len ){
long di;
float si_f;
long si_i;
float scale = (float)(src_len - 1) / (dst_len - 1);
if ( src_len == 1 ) {
for( di = 0; di < dst_len - 1; di++ ) {
long dst_pos = dst_start + di*dst_stride;
dst[dst_pos] = src[ src_start ];
}
} else {
for( di = 0; di < dst_len - 1; di++ ) {
long dst_pos = dst_start + di*dst_stride;
si_f = di * scale; si_i = (long)si_f; si_f -= si_i;
dst[dst_pos] = image_(FromIntermediate)(
(1 - si_f) * src[ src_start + si_i * src_stride ] +
si_f * src[ src_start + (si_i + 1) * src_stride ]);
}
}
dst[ dst_start + (dst_len - 1) * dst_stride ] =
src[ src_start + (src_len - 1) * src_stride ];
}
else if ( dst_len < src_len ) {
long di;
long si0_i = 0; float si0_f = 0;
long si1_i; float si1_f;
long si;
float scale = (float)src_len / dst_len;
float acc, n;
for( di = 0; di < dst_len; di++ )
{
si1_f = (di + 1) * scale; si1_i = (long)si1_f; si1_f -= si1_i;
acc = (1 - si0_f) * src[ src_start + si0_i * src_stride ];
n = 1 - si0_f;
for( si = si0_i + 1; si < si1_i; si++ )
{
acc += src[ src_start + si * src_stride ];
n += 1;
}
if( si1_i < src_len )
{
acc += si1_f * src[ src_start + si1_i*src_stride ];
n += si1_f;
}
dst[ dst_start + di*dst_stride ] = image_(FromIntermediate)(acc / n);
si0_i = si1_i; si0_f = si1_f;
}
}
else {
long i;
for( i = 0; i < dst_len; i++ )
dst[ dst_start + i*dst_stride ] = src[ src_start + i*src_stride ];
}
}
static inline temp_t image_(Main_cubicInterpolate)(temp_t p0,
temp_t p1,
temp_t p2,
temp_t p3,
temp_t x) {
temp_t a0 = p1;
temp_t a1 = p2 - p0;
temp_t a2 = 2 * p0 - 5 * p1 + 4 * p2 - p3;
temp_t a3 = 3 * (p1 - p2) + p3 - p0;
return a0 + 0.5 * x * (a1 + x * (a2 + x * a3));
}
static void image_(Main_scaleCubic_rowcol)(THTensor *Tsrc,
THTensor *Tdst,
long src_start,
long dst_start,
long src_stride,
long dst_stride,
long src_len,
long dst_len ) {
real *src= THTensor_(data)(Tsrc);
real *dst= THTensor_(data)(Tdst);
if ( dst_len == src_len ){
long i;
for( i = 0; i < dst_len; i++ )
dst[ dst_start + i*dst_stride ] = src[ src_start + i*src_stride ];
} else if ( src_len == 1 ) {
long i;
for( i = 0; i < dst_len - 1; i++ ) {
long dst_pos = dst_start + i*dst_stride;
dst[dst_pos] = src[ src_start ];
}
} else {
long di;
float si_f;
long si_i;
float scale;
if (dst_len == 1)
scale = (float)(src_len - 1);
else
scale = (float)(src_len - 1) / (dst_len - 1);
for( di = 0; di < dst_len - 1; di++ ) {
long dst_pos = dst_start + di*dst_stride;
si_f = di * scale; si_i = (long)si_f; si_f -= si_i;
temp_t p0;
temp_t p1 = src[ src_start + si_i * src_stride ];
temp_t p2 = src[ src_start + (si_i + 1) * src_stride ];
temp_t p3;
if (si_i > 0) {
p0 = src[ src_start + (si_i - 1) * src_stride ];
} else {
p0 = 2 * p1 - p2;
}
if (si_i + 2 < src_len) {
p3 = src[ src_start + (si_i + 2) * src_stride ];
} else {
p3 = 2 * p2 - p1;
}
temp_t value = image_(Main_cubicInterpolate)(p0, p1, p2, p3, si_f);
dst[dst_pos] = image_(FromIntermediate)(value);
}
dst[ dst_start + (dst_len - 1) * dst_stride ] =
src[ src_start + (src_len - 1) * src_stride ];
}
}
static int image_(Main_scaleBilinear)(lua_State *L) {
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
THTensor *Ttmp;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height;
long src_stride0, src_stride1, src_stride2, src_width, src_height;
long tmp_stride0, tmp_stride1, tmp_stride2, tmp_width, tmp_height;
long i, j, k;
image_(Main_op_validate)(L, Tsrc,Tdst);
int ndims;
if (Tdst->nDimension == 3) ndims = 3;
else ndims = 2;
Ttmp = THTensor_(newWithSize2d)(Tsrc->size[ndims-2], Tdst->size[ndims-1]);
dst_stride0= image_(Main_op_stride)(Tdst,0);
dst_stride1= image_(Main_op_stride)(Tdst,1);
dst_stride2= image_(Main_op_stride)(Tdst,2);
src_stride0= image_(Main_op_stride)(Tsrc,0);
src_stride1= image_(Main_op_stride)(Tsrc,1);
src_stride2= image_(Main_op_stride)(Tsrc,2);
tmp_stride0= image_(Main_op_stride)(Ttmp,0);
tmp_stride1= image_(Main_op_stride)(Ttmp,1);
tmp_stride2= image_(Main_op_stride)(Ttmp,2);
dst_width= Tdst->size[ndims-1];
dst_height= Tdst->size[ndims-2];
src_width= Tsrc->size[ndims-1];
src_height= Tsrc->size[ndims-2];
tmp_width= Ttmp->size[1];
tmp_height= Ttmp->size[0];
for(k=0;k<image_(Main_op_depth)(Tsrc);k++) {
/* compress/expand rows first */
for(j = 0; j < src_height; j++) {
image_(Main_scaleLinear_rowcol)(Tsrc,
Ttmp,
0*src_stride2+j*src_stride1+k*src_stride0,
0*tmp_stride2+j*tmp_stride1+k*tmp_stride0,
src_stride2,
tmp_stride2,
src_width,
tmp_width );
}
/* then columns */
for(i = 0; i < dst_width; i++) {
image_(Main_scaleLinear_rowcol)(Ttmp,
Tdst,
i*tmp_stride2+0*tmp_stride1+k*tmp_stride0,
i*dst_stride2+0*dst_stride1+k*dst_stride0,
tmp_stride1,
dst_stride1,
tmp_height,
dst_height );
}
}
THTensor_(free)(Ttmp);
return 0;
}
static int image_(Main_scaleBicubic)(lua_State *L) {
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
THTensor *Ttmp;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height;
long src_stride0, src_stride1, src_stride2, src_width, src_height;
long tmp_stride0, tmp_stride1, tmp_stride2, tmp_width, tmp_height;
long i, j, k;
image_(Main_op_validate)(L, Tsrc,Tdst);
int ndims;
if (Tdst->nDimension == 3) ndims = 3;
else ndims = 2;
Ttmp = THTensor_(newWithSize2d)(Tsrc->size[ndims-2], Tdst->size[ndims-1]);
dst_stride0= image_(Main_op_stride)(Tdst,0);
dst_stride1= image_(Main_op_stride)(Tdst,1);
dst_stride2= image_(Main_op_stride)(Tdst,2);
src_stride0= image_(Main_op_stride)(Tsrc,0);
src_stride1= image_(Main_op_stride)(Tsrc,1);
src_stride2= image_(Main_op_stride)(Tsrc,2);
tmp_stride0= image_(Main_op_stride)(Ttmp,0);
tmp_stride1= image_(Main_op_stride)(Ttmp,1);
tmp_stride2= image_(Main_op_stride)(Ttmp,2);
dst_width= Tdst->size[ndims-1];
dst_height= Tdst->size[ndims-2];
src_width= Tsrc->size[ndims-1];
src_height= Tsrc->size[ndims-2];
tmp_width= Ttmp->size[1];
tmp_height= Ttmp->size[0];
for(k=0;k<image_(Main_op_depth)(Tsrc);k++) {
/* compress/expand rows first */
for(j = 0; j < src_height; j++) {
image_(Main_scaleCubic_rowcol)(Tsrc,
Ttmp,
0*src_stride2+j*src_stride1+k*src_stride0,
0*tmp_stride2+j*tmp_stride1+k*tmp_stride0,
src_stride2,
tmp_stride2,
src_width,
tmp_width );
}
/* then columns */
for(i = 0; i < dst_width; i++) {
image_(Main_scaleCubic_rowcol)(Ttmp,
Tdst,
i*tmp_stride2+0*tmp_stride1+k*tmp_stride0,
i*dst_stride2+0*dst_stride1+k*dst_stride0,
tmp_stride1,
dst_stride1,
tmp_height,
dst_height );
}
}
THTensor_(free)(Ttmp);
return 0;
}
static int image_(Main_scaleSimple)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float scx, scy;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "image.scale: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "image.scale: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( (Tdst->nDimension==3 && ( src_depth!=dst_depth)) ||
(Tdst->nDimension!=Tsrc->nDimension) ) {
printf("image.scale:%d,%d,%ld,%ld\n",Tsrc->nDimension,Tdst->nDimension,src_depth,dst_depth);
luaL_error(L, "image.scale: src and dst depths do not match");
}
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.scale: src and dst depths do not match");
/* printf("%d,%d -> %d,%d\n",src_width,src_height,dst_width,dst_height); */
scx=((float)src_width)/((float)dst_width);
scy=((float)src_height)/((float)dst_height);
#pragma omp parallel for private(j, i, k)
for(j = 0; j < dst_height; j++) {
for(i = 0; i < dst_width; i++) {
float val = 0.0;
long ii=(long) (((float)i)*scx);
long jj=(long) (((float)j)*scy);
if(ii>src_width-1) ii=src_width-1;
if(jj>src_height-1) jj=src_height-1;
if(Tsrc->nDimension==2)
{
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
for(k=0;k<src_depth;k++)
{
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_rotate)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float theta = luaL_checknumber(L, 3);
float cos_theta, sin_theta;
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float xc, yc;
float id,jd;
long ii,jj;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
if (dst == src) {
luaL_error(L, "image.rotate: in-place rotate not supported");
}
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.rotate: src and dst depths do not match");
if( (Tsrc->nDimension!=Tdst->nDimension) )
luaL_error(L, "image.rotate: src and dst depths do not match");
xc = (src_width-1)/2.0;
yc = (src_height-1)/2.0;
sin_theta = sin(theta);
cos_theta = cos(theta);
for(j = 0; j < dst_height; j++) {
jd=j;
for(i = 0; i < dst_width; i++) {
float val = -1;
id= i;
ii = (long) round(cos_theta*(id-xc) - sin_theta*(jd-yc) + xc);
jj = (long) round(cos_theta*(jd-yc) + sin_theta*(id-xc) + yc);
/* rotated corners are blank */
if(ii>src_width-1) val=0;
if(jj>src_height-1) val=0;
if(ii<0) val=0;
if(jj<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_rotateBilinear)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float theta = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float xc, yc;
float id,jd;
long ii_0, ii_1, jj_0, jj_1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
if (dst == src) {
luaL_error(L, "image.rotate: in-place rotate not supported");
}
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.rotate: src and dst depths do not match");
if( (Tsrc->nDimension!=Tdst->nDimension) )
luaL_error(L, "image.rotate: src and dst depths do not match");
xc = (src_width-1)/2.0;
yc = (src_height-1)/2.0;
for(j = 0; j < dst_height; j++) {
jd=j;
for(i = 0; i < dst_width; i++) {
float val = -1;
temp_t ri, rj, wi, wj;
id= i;
ri = cos(theta)*(id-xc)-sin(theta)*(jd-yc);
rj = cos(theta)*(jd-yc)+sin(theta)*(id-xc);
ii_0 = (long)floor(ri+xc);
ii_1 = ii_0 + 1;
jj_0 = (long)floor(rj+yc);
jj_1 = jj_0 + 1;
wi = ri+xc-ii_0;
wj = rj+yc-jj_0;
/* default to the closest value when interpolating on image boundaries (either image pixel or 0) */
if(ii_1==src_width && wi<0.5) ii_1 = ii_0;
else if(ii_1>=src_width) val=0;
if(jj_1==src_height && wj<0.5) jj_1 = jj_0;
else if(jj_1>=src_height) val=0;
if(ii_0==-1 && wi>0.5) ii_0 = ii_1;
else if(ii_0<0) val=0;
if(jj_0==-1 && wj>0.5) jj_0 = jj_1;
else if(jj_0<0) val=0;
if(Tsrc->nDimension==2) {
if(val==-1)
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
} else {
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++) {
if(do_copy) {
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0];
}
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_polar)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX;
long ii,jj;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
id = (float) i;
r = (m * id) / (float) dst_width; // current distance
jj = (long) floor( r * cos(a) + midY); // y-location in source image
ii = (long) floor(-r * sin(a) + midX); // x-location in source image
if(ii>src_width-1) val=0;
if(jj>src_height-1) val=0;
if(ii<0) val=0;
if(jj<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_polarBilinear)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX;
long ii_0, ii_1, jj_0, jj_1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
temp_t ri, rj, wi, wj;
id = (float) i;
r = (m * id) / (float) dst_width; // current distance
rj = r * cos(a) + midY; // y-location in source image
ri = -r * sin(a) + midX; // x-location in source image
ii_0=(long)floor(ri);
ii_1=ii_0 + 1;
jj_0=(long)floor(rj);
jj_1=jj_0 + 1;
wi = ri - ii_0;
wj = rj - jj_0;
// switch to nearest interpolation when bilinear is impossible
if(ii_1>src_width-1 || jj_1>src_height-1 || ii_0<0 || jj_0<0) {
if(ii_0>src_width-1) val=0;
if(jj_0>src_height-1) val=0;
if(ii_0<0) val=0;
if(jj_0<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii_0*src_stride2+jj_0*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
// bilinear interpolation
else {
if(Tsrc->nDimension==2) {
if(val==-1)
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
} else {
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++) {
if(do_copy) {
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0];
}
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
}
return 0;
}
static int image_(Main_logPolar)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX, fw;
long ii,jj;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
fw = log(m) / (float) dst_width;
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
id = (float) i;
r = exp(id * fw);
jj = (long) floor( r * cos(a) + midY); // y-location in source image
ii = (long) floor(-r * sin(a) + midX); // x-location in source image
if(ii>src_width-1) val=0;
if(jj>src_height-1) val=0;
if(ii<0) val=0;
if(jj<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_logPolarBilinear)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX, fw;
long ii_0, ii_1, jj_0, jj_1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
fw = log(m) / (float) dst_width;
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
float ri, rj, wi, wj;
id = (float) i;
r = exp(id * fw);
rj = r * cos(a) + midY; // y-location in source image
ri = -r * sin(a) + midX; // x-location in source image
ii_0=(long)floor(ri);
ii_1=ii_0 + 1;
jj_0=(long)floor(rj);
jj_1=jj_0 + 1;
wi = ri - ii_0;
wj = rj - jj_0;
// switch to nearest interpolation when bilinear is impossible
if(ii_1>src_width-1 || jj_1>src_height-1 || ii_0<0 || jj_0<0) {
if(ii_0>src_width-1) val=0;
if(jj_0>src_height-1) val=0;
if(ii_0<0) val=0;
if(jj_0<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii_0*src_stride2+jj_0*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
// bilinear interpolation
else {
if(Tsrc->nDimension==2) {
if(val==-1)
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
} else {
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++) {
if(do_copy) {
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0];
}
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
}
return 0;
}
static int image_(Main_cropNoScale)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
long startx = luaL_checklong(L, 3);
long starty = luaL_checklong(L, 4);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( startx<0 || starty<0 || (startx+dst_width>src_width) || (starty+dst_height>src_height))
luaL_error(L, "image.crop: crop goes outside bounds of src");
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.crop: src and dst depths do not match");
for(j = 0; j < dst_height; j++) {
for(i = 0; i < dst_width; i++) {
float val = 0.0;
long ii=i+startx;
long jj=j+starty;
if(Tsrc->nDimension==2)
{
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
for(k=0;k<src_depth;k++)
{
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_translate)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
long shiftx = luaL_checklong(L, 3);
long shifty = luaL_checklong(L, 4);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 1;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 1;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 1;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 1;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.translate: src and dst depths do not match");
for(j = 0; j < src_height; j++) {
for(i = 0; i < src_width; i++) {
long ii=i+shiftx;
long jj=j+shifty;
// Check it's within destination bounds, else crop
if(ii<dst_width && jj<dst_height && ii>=0 && jj>=0) {
for(k=0;k<src_depth;k++) {
dst[ii*dst_stride2+jj*dst_stride1+k*dst_stride0] = src[i*src_stride2+j*src_stride1+k*src_stride0];
}
}
}
}
return 0;
}
static int image_(Main_saturate)(lua_State *L) {
#ifdef TH_REAL_IS_BYTE
// Noop since necessarily constrained to [0, 255].
#else
THTensor *input = luaT_checkudata(L, 1, torch_Tensor);
THTensor *output = input;
TH_TENSOR_APPLY2(real, output, real, input, \
*output_data = (*input_data < 0) ? 0 : (*input_data > 1) ? 1 : *input_data;)
#endif
return 1;
}
/*
* Converts an RGB color value to HSL. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSL_color_space.
* Assumes r, g, and b are contained in the set [0, 1] and
* returns h, s, and l in the set [0, 1].
*/
int image_(Main_rgb2hsl)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *hsl = luaT_checkudata(L, 2, torch_Tensor);
int y,x;
temp_t r, g, b, h, s, l;
for (y=0; y<rgb->size[1]; y++) {
for (x=0; x<rgb->size[2]; x++) {
// get Rgb
r = THTensor_(get3d)(rgb, 0, y, x);
g = THTensor_(get3d)(rgb, 1, y, x);
b = THTensor_(get3d)(rgb, 2, y, x);
#ifdef TH_REAL_IS_BYTE
r /= 255;
g /= 255;
b /= 255;
#endif
temp_t mx = max(max(r, g), b);
temp_t mn = min(min(r, g), b);
if(mx == mn) {
h = 0; // achromatic
s = 0;
l = mx;
} else {
temp_t d = mx - mn;
if (mx == r) {
h = (g - b) / d + (g < b ? 6 : 0);
} else if (mx == g) {
h = (b - r) / d + 2;
} else {
h = (r - g) / d + 4;
}
h /= 6;
l = (mx + mn) / 2;
s = l > 0.5 ? d / (2 - mx - mn) : d / (mx + mn);
}
// set hsl
#ifdef TH_REAL_IS_BYTE
h *= 255;
s *= 255;
l *= 255;
#endif
THTensor_(set3d)(hsl, 0, y, x, image_(FromIntermediate)(h));
THTensor_(set3d)(hsl, 1, y, x, image_(FromIntermediate)(s));
THTensor_(set3d)(hsl, 2, y, x, image_(FromIntermediate)(l));
}
}
return 0;
}
// helper
static inline temp_t image_(hue2rgb)(temp_t p, temp_t q, temp_t t) {
if (t < 0.) t += 1;
if (t > 1.) t -= 1;
if (t < 1./6)
return p + (q - p) * 6. * t;
else if (t < 1./2)
return q;
else if (t < 2./3)
return p + (q - p) * (2./3 - t) * 6.;
else
return p;
}
/*
* Converts an HSL color value to RGB. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSL_color_space.
* Assumes h, s, and l are contained in the set [0, 1] and
* returns r, g, and b in the set [0, 1].
*/
int image_(Main_hsl2rgb)(lua_State *L) {
THTensor *hsl = luaT_checkudata(L, 1, torch_Tensor);
THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
int y,x;
temp_t r, g, b, h, s, l;
for (y=0; y<hsl->size[1]; y++) {
for (x=0; x<hsl->size[2]; x++) {
// get hsl
h = THTensor_(get3d)(hsl, 0, y, x);
s = THTensor_(get3d)(hsl, 1, y, x);
l = THTensor_(get3d)(hsl, 2, y, x);
#ifdef TH_REAL_IS_BYTE
h /= 255;
s /= 255;
l /= 255;
#endif
if(s == 0) {
// achromatic
r = l;
g = l;
b = l;
} else {
temp_t q = (l < 0.5) ? (l * (1 + s)) : (l + s - l * s);
temp_t p = 2 * l - q;
temp_t hr = h + 1./3;
temp_t hg = h;
temp_t hb = h - 1./3;
r = image_(hue2rgb)(p, q, hr);
g = image_(hue2rgb)(p, q, hg);
b = image_(hue2rgb)(p, q, hb);
}
// set rgb
#ifdef TH_REAL_IS_BYTE
r *= 255;
g *= 255;
b *= 255;
#endif
THTensor_(set3d)(rgb, 0, y, x, image_(FromIntermediate)(r));
THTensor_(set3d)(rgb, 1, y, x, image_(FromIntermediate)(g));
THTensor_(set3d)(rgb, 2, y, x, image_(FromIntermediate)(b));
}
}
return 0;
}
/*
* Converts an RGB color value to HSV. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSV_color_space.
* Assumes r, g, and b are contained in the set [0, 1] and
* returns h, s, and v in the set [0, 1].
*/
int image_(Main_rgb2hsv)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *hsv = luaT_checkudata(L, 2, torch_Tensor);
int y, x;
temp_t r, g, b, h, s, v;
for (y=0; y<rgb->size[1]; y++) {
for (x=0; x<rgb->size[2]; x++) {
// get Rgb
r = THTensor_(get3d)(rgb, 0, y, x);
g = THTensor_(get3d)(rgb, 1, y, x);
b = THTensor_(get3d)(rgb, 2, y, x);
#ifdef TH_REAL_IS_BYTE
r /= 255;
g /= 255;
b /= 255;
#endif
temp_t mx = max(max(r, g), b);
temp_t mn = min(min(r, g), b);
if(mx == mn) {
// achromatic
h = 0;
s = 0;
v = mx;
} else {
temp_t d = mx - mn;
if (mx == r) {
h = (g - b) / d + (g < b ? 6 : 0);
} else if (mx == g) {
h = (b - r) / d + 2;
} else {
h = (r - g) / d + 4;
}
h /= 6;
s = d / mx;
v = mx;
}
// set hsv
#ifdef TH_REAL_IS_BYTE
h *= 255;
s *= 255;
v *= 255;
#endif
THTensor_(set3d)(hsv, 0, y, x, image_(FromIntermediate)(h));
THTensor_(set3d)(hsv, 1, y, x, image_(FromIntermediate)(s));
THTensor_(set3d)(hsv, 2, y, x, image_(FromIntermediate)(v));
}
}
return 0;
}
/*
* Converts an HSV color value to RGB. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSV_color_space.
* Assumes h, s, and l are contained in the set [0, 1] and
* returns r, g, and b in the set [0, 1].
*/
int image_(Main_hsv2rgb)(lua_State *L) {
THTensor *hsv = luaT_checkudata(L, 1, torch_Tensor);
THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
int y, x;
temp_t r, g, b, h, s, v;
for (y=0; y<hsv->size[1]; y++) {
for (x=0; x<hsv->size[2]; x++) {
// get hsv
h = THTensor_(get3d)(hsv, 0, y, x);
s = THTensor_(get3d)(hsv, 1, y, x);
v = THTensor_(get3d)(hsv, 2, y, x);
#ifdef TH_REAL_IS_BYTE
h /= 255;
s /= 255;
v /= 255;
#endif
int i = floor(h*6.);
temp_t f = h*6-i;
temp_t p = v*(1-s);
temp_t q = v*(1-f*s);
temp_t t = v*(1-(1-f)*s);
switch (i % 6) {
case 0: r = v, g = t, b = p; break;
case 1: r = q, g = v, b = p; break;
case 2: r = p, g = v, b = t; break;
case 3: r = p, g = q, b = v; break;
case 4: r = t, g = p, b = v; break;
case 5: r = v, g = p, b = q; break;
default: r=0; g = 0, b = 0; break;
}
// set rgb
#ifdef TH_REAL_IS_BYTE
r *= 255;
g *= 255;
b *= 255;
#endif
THTensor_(set3d)(rgb, 0, y, x, image_(FromIntermediate)(r));
THTensor_(set3d)(rgb, 1, y, x, image_(FromIntermediate)(g));
THTensor_(set3d)(rgb, 2, y, x, image_(FromIntermediate)(b));
}
}
return 0;
}
#ifndef TH_REAL_IS_BYTE
/*
* Convert an sRGB color channel to a linear sRGB color channel.
*/
static inline real image_(gamma_expand_sRGB)(real nonlinear)
{
return (nonlinear <= 0.04045) ? (nonlinear / 12.92)
: (pow((nonlinear+0.055)/1.055, 2.4));
}
/*
* Convert a linear sRGB color channel to a sRGB color channel.
*/
static inline real image_(gamma_compress_sRGB)(real linear)
{
return (linear <= 0.0031308) ? (12.92 * linear)
: (1.055 * pow(linear, 1.0/2.4) - 0.055);
}
/*
* Converts an sRGB color value to LAB.
* Based on http://www.brucelindbloom.com/index.html?Equations.html.
* Assumes r, g, and b are contained in the set [0, 1].
* LAB output is NOT restricted to [0, 1]!
*/
int image_(Main_rgb2lab)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *lab = luaT_checkudata(L, 2, torch_Tensor);
// CIE Standard
double epsilon = 216.0/24389.0;
double k = 24389.0/27.0;
// D65 white point
double xn = 0.950456;
double zn = 1.088754;
int y,x;
real r,g,b,l,a,_b;
for (y=0; y<rgb->size[1]; y++) {
for (x=0; x<rgb->size[2]; x++) {
// get RGB
r = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 0, y, x));
g = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 1, y, x));
b = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 2, y, x));
// sRGB to XYZ
double X = 0.412453 * r + 0.357580 * g + 0.180423 * b;
double Y = 0.212671 * r + 0.715160 * g + 0.072169 * b;
double Z = 0.019334 * r + 0.119193 * g + 0.950227 * b;
// normalize for D65 white point
X /= xn;
Z /= zn;
// XYZ normalized to CIE Lab
double fx = X > epsilon ? pow(X, 1/3.0) : (k * X + 16)/116;
double fy = Y > epsilon ? pow(Y, 1/3.0) : (k * Y + 16)/116;
double fz = Z > epsilon ? pow(Z, 1/3.0) : (k * Z + 16)/116;
l = 116 * fy - 16;
a = 500 * (fx - fy);
_b = 200 * (fy - fz);
// set lab
THTensor_(set3d)(lab, 0, y, x, l);
THTensor_(set3d)(lab, 1, y, x, a);
THTensor_(set3d)(lab, 2, y, x, _b);
}
}
return 0;
}
/*
* Converts an LAB color value to sRGB.
* Based on http://www.brucelindbloom.com/index.html?Equations.html.
* returns r, g, and b in the set [0, 1].
*/
int image_(Main_lab2rgb)(lua_State *L) {
THTensor *lab = luaT_checkudata(L, 1, torch_Tensor);
THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
int y,x;
real r,g,b,l,a,_b;
// CIE Standard
double epsilon = 216.0/24389.0;
double k = 24389.0/27.0;
// D65 white point
double xn = 0.950456;
double zn = 1.088754;
for (y=0; y<lab->size[1]; y++) {
for (x=0; x<lab->size[2]; x++) {
// get lab
l = THTensor_(get3d)(lab, 0, y, x);
a = THTensor_(get3d)(lab, 1, y, x);
_b = THTensor_(get3d)(lab, 2, y, x);
// LAB to XYZ
double fy = (l + 16) / 116;
double fz = fy - _b / 200;
double fx = (a / 500) + fy;
double X = pow(fx, 3);
if (X <= epsilon)
X = (116 * fx - 16) / k;
double Y = l > (k * epsilon) ? pow((l + 16) / 116, 3) : l/k;
double Z = pow(fz, 3);
if (Z <= epsilon)
Z = (116 * fz - 16) / k;
X *= xn;
Z *= zn;
// XYZ to sRGB
r = 3.2404542 * X - 1.5371385 * Y - 0.4985314 * Z;
g = -0.9692660 * X + 1.8760108 * Y + 0.0415560 * Z;
b = 0.0556434 * X - 0.2040259 * Y + 1.0572252 * Z;
// set rgb
THTensor_(set3d)(rgb, 0, y, x, image_(gamma_compress_sRGB(r)));
THTensor_(set3d)(rgb, 1, y, x, image_(gamma_compress_sRGB(g)));
THTensor_(set3d)(rgb, 2, y, x, image_(gamma_compress_sRGB(b)));
}
}
return 0;
}
#else
int image_(Main_rgb2lab)(lua_State *L) {
return luaL_error(L, "image.rgb2lab: not supported for torch.ByteTensor");
}
int image_(Main_lab2rgb)(lua_State *L) {
return luaL_error(L, "image.lab2rgb: not supported for torch.ByteTensor");
}
#endif // TH_REAL_IS_BYTE
/* Vertically flip an image */
int image_(Main_vflip)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
int width = dst->size[2];
int height = dst->size[1];
int channels = dst->size[0];
long *is = src->stride;
long *os = dst->stride;
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
long k, x, y;
if (dst_data != src_data) {
/* not in-place.
* this branch could be removed by first duplicating the src into dst then doing inplace */
#pragma omp parallel for private(k, x, y)
for(k=0; k<channels; k++) {
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
dst_data[ k*os[0] + (height-1-y)*os[1] + x*os[2] ] = src_data[ k*is[0] + y*is[1] + x*is[2] ];
}
}
}
} else {
/* in-place */
real swap, * src_px, * dst_px;
long half_height = height >> 1;
for(k=0; k<channels; k++) {
for (y=0; y < half_height; y++) {
for (x=0; x<width; x++) {
src_px = src_data + k*is[0] + y*is[1] + x*is[2];
dst_px = dst_data + k*is[0] + (height-1-y)*is[1] + x*is[2];
swap = *dst_px;
*dst_px = *src_px;
*src_px = swap;
}
}
}
}
return 0;
}
/* Horizontally flip an image */
int image_(Main_hflip)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
int width = dst->size[2];
int height = dst->size[1];
int channels = dst->size[0];
long *is = src->stride;
long *os = dst->stride;
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
long k, x, y;
if (dst_data != src_data) {
/* not in-place.
* this branch could be removed by first duplicating the src into dst then doing inplace */
#pragma omp parallel for private(k, x, y)
for(k=0; k<channels; k++) {
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
dst_data[ k*os[0] + y*os[1] + (width-x-1)*os[2] ] = src_data[ k*is[0] + y*is[1] + x*is[2] ];
}
}
}
} else {
/* in-place */
real swap, * src_px, * dst_px;
long half_width = width >> 1;
for(k=0; k<channels; k++) {
for (y=0; y < height; y++) {
for (x=0; x<half_width; x++) {
src_px = src_data + k*is[0] + y*is[1] + x*is[2];
dst_px = dst_data + k*is[0] + y*is[1] + (width-x-1)*is[2];
swap = *dst_px;
*dst_px = *src_px;
*src_px = swap;
}
}
}
}
return 0;
}
/* flip an image along a specified dimension */
int image_(Main_flip)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
long flip_dim = luaL_checklong(L, 3);
if ((dst->nDimension != 5) || (src->nDimension != 5)) {
luaL_error(L, "image.flip: expected 5 dimensions for src and dst");
}
if (flip_dim < 1 || flip_dim > dst->nDimension || flip_dim > 5) {
luaL_error(L, "image.flip: flip_dim out of bounds");
}
flip_dim--; // Make it zero indexed
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
if (dst_data == src_data) {
luaL_error(L, "image.flip: in-place flip not supported");
}
long size0 = dst->size[0];
long size1 = dst->size[1];
long size2 = dst->size[2];
long size3 = dst->size[3];
long size4 = dst->size[4];
if (src->size[0] != size0 || src->size[1] != size1 ||
src->size[2] != size2 || src->size[3] != size3 ||
src->size[4] != size4) {
luaL_error(L, "image.flip: src and dst are not the same size");
}
long *is = src->stride;
long *os = dst->stride;
long x, y, z, d, t, isrc, idst = 0;
for (t = 0; t < size0; t++) {
for (d = 0; d < size1; d++) {
for (z = 0; z < size2; z++) {
for (y = 0; y < size3; y++) {
for (x = 0; x < size4; x++) {
isrc = t*is[0] + d*is[1] + z*is[2] + y*is[3] + x*is[4];
// The big switch statement here looks ugly, however on my machine
// gcc compiles it to a skip list, so it should be fast.
switch (flip_dim) {
case 0:
idst = (size0 - t - 1)*os[0] + d*os[1] + z*os[2] + y*os[3] + x*os[4];
break;
case 1:
idst = t*os[0] + (size1 - d - 1)*os[1] + z*os[2] + y*os[3] + x*os[4];
break;
case 2:
idst = t*os[0] + d*os[1] + (size2 - z - 1)*os[2] + y*os[3] + x*os[4];
break;
case 3:
idst = t*os[0] + d*os[1] + z*os[2] + (size3 - y - 1)*os[3] + x*os[4];
break;
case 4:
idst = t*os[0] + d*os[1] + z*os[2] + y*os[3] + (size4 - x - 1)*os[4];
break;
}
dst_data[ idst ] = src_data[ isrc ];
}
}
}
}
}
return 0;
}
static inline void image_(Main_bicubicInterpolate)(
real* src, long* is, long* size, temp_t ix, temp_t iy,
real* dst, long *os,
real pad_value, int bounds_check)
{
int i, j, k;
temp_t arr[4], p[4];
// Calculate fractional and integer components
long x_pix = floor(ix);
long y_pix = floor(iy);
temp_t dx = ix - x_pix;
temp_t dy = iy - y_pix;
for (k=0; k<size[0]; k++) {
#pragma unroll
for (i = 0; i < 4; i++) {
long v = y_pix + i - 1;
real* data = &src[k * is[0] + v * is[1]];
#pragma unroll
for (j = 0; j < 4; j++) {
long u = x_pix + j - 1;
if (bounds_check && (v < 0 || v >= size[1] || u < 0 || u >= size[2])) {
p[j] = pad_value;
} else {
p[j] = data[u * is[2]];
}
}
arr[i] = image_(Main_cubicInterpolate)(p[0], p[1], p[2], p[3], dx);
}
temp_t value = image_(Main_cubicInterpolate)(arr[0], arr[1], arr[2], arr[3], dy);
dst[k * os[0]] = image_(FromIntermediate)(value);
}
}
/*
* Warps an image, according to an (x,y) flow field. The flow
* field is in the space of the destination image, each vector
* ponts to a source pixel in the original image.
*/
int image_(Main_warp)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
THTensor *flowfield = luaT_checkudata(L, 3, torch_Tensor);
int mode = lua_tointeger(L, 4);
int offset_mode = lua_toboolean(L, 5);
int clamp_mode = lua_tointeger(L, 6);
real pad_value = (real)lua_tonumber(L, 7);
// dims
int width = dst->size[2];
int height = dst->size[1];
int src_width = src->size[2];
int src_height = src->size[1];
int channels = dst->size[0];
long *is = src->stride;
long *os = dst->stride;
long *fs = flowfield->stride;
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
real *flow_data = THTensor_(data)(flowfield);
// resample
long k,x,y,v,u,i,j;
#pragma omp parallel for private(k, x, y, v, u, i, j)
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
// subpixel position:
float flow_y = flow_data[ 0*fs[0] + y*fs[1] + x*fs[2] ];
float flow_x = flow_data[ 1*fs[0] + y*fs[1] + x*fs[2] ];
float iy = offset_mode*y + flow_y;
float ix = offset_mode*x + flow_x;
// borders
int off_image = 0;
if (iy < 0 || iy > src_height - 1 ||
ix < 0 || ix > src_width - 1) {
off_image = 1;
}
if (off_image == 1 && clamp_mode == 1) {
// We're off the image and we're clamping the input image to 0
for (k=0; k<channels; k++) {
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = pad_value;
}
} else {
ix = MAX(ix,0); ix = MIN(ix,src_width-1);
iy = MAX(iy,0); iy = MIN(iy,src_height-1);
// bilinear?
switch (mode) {
case 1: // Bilinear interpolation
{
// 4 nearest neighbors:
long ix_nw = floor(ix);
long iy_nw = floor(iy);
long ix_ne = ix_nw + 1;
long iy_ne = iy_nw;
long ix_sw = ix_nw;
long iy_sw = iy_nw + 1;
long ix_se = ix_nw + 1;
long iy_se = iy_nw + 1;
// get surfaces to each neighbor:
temp_t nw = (ix_se-ix)*(iy_se-iy);
temp_t ne = (ix-ix_sw)*(iy_sw-iy);
temp_t sw = (ix_ne-ix)*(iy-iy_ne);
temp_t se = (ix-ix_nw)*(iy-iy_nw);
// weighted sum of neighbors:
for (k=0; k<channels; k++) {
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = image_(FromIntermediate)(
src_data[ k*is[0] + iy_nw*is[1] + ix_nw*is[2] ] * nw
+ src_data[ k*is[0] + iy_ne*is[1] + MIN(ix_ne,src_width-1)*is[2] ] * ne
+ src_data[ k*is[0] + MIN(iy_sw,src_height-1)*is[1] + ix_sw*is[2] ] * sw
+ src_data[ k*is[0] + MIN(iy_se,src_height-1)*is[1] + MIN(ix_se,src_width-1)*is[2] ] * se);
}
}
break;
case 0: // Simple (i.e., nearest neighbor)
{
// 1 nearest neighbor:
long ix_n = floor(ix+0.5);
long iy_n = floor(iy+0.5);
// weighted sum of neighbors:
for (k=0; k<channels; k++) {
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = src_data[ k*is[0] + iy_n*is[1] + ix_n*is[2] ];
}
}
break;
case 2: // Bicubic
{
// We only need to do bounds checking if ix or iy are near the edge
int edge = !(iy >= 1 && iy < src_height - 2 && ix >= 1 && ix < src_width - 2);
real* dst = dst_data + y*os[1] + x*os[2];
if (edge) {
image_(Main_bicubicInterpolate)(src_data, is, src->size, ix, iy, dst, os, pad_value, 1);
} else {
image_(Main_bicubicInterpolate)(src_data, is, src->size, ix, iy, dst, os, pad_value, 0);
}
}
break;
case 3: // Lanczos
{
// Note: Lanczos can be made fast if the resampling period is
// constant... and therefore the Lu, Lv can be cached and reused.
// However, unfortunately warp makes no assumptions about resampling
// and so we need to perform the O(k^2) convolution on each pixel AND
// we have to re-calculate the kernel for every pixel.
// See wikipedia for more info.
// It is however an extremely good approximation to to full sinc
// interpolation (IIR) filter.
// Another note is that the version here has been optimized using
// pretty aggressive code flow and explicit inlining. It might not
// be very readable (contact me, Jonathan Tompson, if it is not)
// Calculate fractional and integer components
long x_pix = floor(ix);
long y_pix = floor(iy);
// Precalculate the L(x) function evaluations in the u and v direction
#define rad (3) // This is a tunable parameter: 2 to 3 is OK
float Lu[2 * rad]; // L(x) for u direction
float Lv[2 * rad]; // L(x) for v direction
for (u=x_pix-rad+1, i=0; u<=x_pix+rad; u++, i++) {
float du = ix - (float)u; // Lanczos kernel x value
du = du < 0 ? -du : du; // prefer not to used std absf
if (du < 0.000001f) { // TODO: Is there a real eps standard?
Lu[i] = 1;
} else if (du > (float)rad) {
Lu[i] = 0;
} else {
Lu[i] = ((float)rad * sin((float)M_PI * du) *
sin((float)M_PI * du / (float)rad)) /
((float)(M_PI * M_PI) * du * du);
}
}
for (v=y_pix-rad+1, i=0; v<=y_pix+rad; v++, i++) {
float dv = iy - (float)v; // Lanczos kernel x value
dv = dv < 0 ? -dv : dv; // prefer not to used std absf
if (dv < 0.000001f) { // TODO: Is there a real eps standard?
Lv[i] = 1;
} else if (dv > (float)rad) {
Lv[i] = 0;
} else {
Lv[i] = ((float)rad * sin((float)M_PI * dv) *
sin((float)M_PI * dv / (float)rad)) /
((float)(M_PI * M_PI) * dv * dv);
}
}
float sum_weights = 0;
for (u=0; u<2*rad; u++) {
for (v=0; v<2*rad; v++) {
sum_weights += (Lu[u] * Lv[v]);
}
}
for (k=0; k<channels; k++) {
temp_t result = 0;
for (u=x_pix-rad+1, i=0; u<=x_pix+rad; u++, i++) {
long curu = MAX(MIN((long)(src_width-1), u), 0);
for (v=y_pix-rad+1, j=0; v<=y_pix+rad; v++, j++) {
long curv = MAX(MIN((long)(src_height-1), v), 0);
temp_t Suv = src_data[k * is[0] + curv * is[1] + curu * is[2]];
temp_t weight = Lu[i] * Lv[j];
result += (Suv * weight);
}
}
// Normalize by the sum of the weights
result = result / (float)sum_weights;
// Again, I assume that since the image is stored as reals we
// don't have to worry about clamping to min and max int (to
// prevent over or underflow)
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = image_(FromIntermediate)(result);
}
}
break;
} // end switch (mode)
} // end else
}
}
// done
return 0;
}
int image_(Main_gaussian)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
long width = dst->size[1];
long height = dst->size[0];
long *os = dst->stride;
real *dst_data = THTensor_(data)(dst);
temp_t amplitude = (temp_t)lua_tonumber(L, 2);
int normalize = (int)lua_toboolean(L, 3);
temp_t sigma_u = (temp_t)lua_tonumber(L, 4);
temp_t sigma_v = (temp_t)lua_tonumber(L, 5);
temp_t mean_u = (temp_t)lua_tonumber(L, 6) * width + 0.5;
temp_t mean_v = (temp_t)lua_tonumber(L, 7) * height + 0.5;
// Precalculate 1/(sigma*size) for speed (for some stupid reason the pragma
// omp declaration prevents gcc from optimizing the inside loop on my macine:
// verified by checking the assembly output)
temp_t over_sigmau = 1.0 / (sigma_u * width);
temp_t over_sigmav = 1.0 / (sigma_v * height);
long v, u;
temp_t du, dv;
#pragma omp parallel for private(v, u, du, dv)
for (v = 0; v < height; v++) {
for (u = 0; u < width; u++) {
du = (u + 1 - mean_u) * over_sigmau;
dv = (v + 1 - mean_v) * over_sigmav;
temp_t value = amplitude * exp(-0.5 * (du*du + dv*dv));
dst_data[ v*os[0] + u*os[1] ] = image_(FromIntermediate)(value);
}
}
if (normalize) {
temp_t sum = 0;
// We could parallelize this, but it's more trouble than it's worth
for(v = 0; v < height; v++) {
for(u = 0; u < width; u++) {
sum += dst_data[ v*os[0] + u*os[1] ];
}
}
temp_t one_over_sum = 1.0 / sum;
#pragma omp parallel for private(v, u)
for(v = 0; v < height; v++) {
for(u = 0; u < width; u++) {
dst_data[ v*os[0] + u*os[1] ] *= one_over_sum;
}
}
}
return 0;
}
/*
* Borrowed from github.com/clementfarabet/lua---imgraph
* with Clément's permission for implementing y2jet()
*/
int image_(Main_colorize)(lua_State *L) {
// get args
THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor);
THTensor *input = (THTensor *)luaT_checkudata(L, 2, torch_Tensor);
THTensor *colormap = (THTensor *)luaT_checkudata(L, 3, torch_Tensor);
// dims
long height = input->size[0];
long width = input->size[1];
// generate color map if not given
int noColorMap = THTensor_(nElement)(colormap) == 0;
if (noColorMap) {
THTensor_(resize2d)(colormap, width*height, 3);
THTensor_(fill)(colormap, -1);
}
// colormap channels
int channels = colormap->size[1];
// generate output
THTensor_(resize3d)(output, channels, height, width);
int x,y,k;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
int id = THTensor_(get2d)(input, y, x);
if (noColorMap) {
for (k = 0; k < channels; k++) {
temp_t value = (float)rand() / (float)RAND_MAX;
#ifdef TH_REAL_IS_BYTE
value *= 255;
#endif
THTensor_(set2d)(colormap, id, k, image_(FromIntermediate)(value));
}
}
for (k = 0; k < channels; k++) {
real color = THTensor_(get2d)(colormap, id, k);
THTensor_(set3d)(output, k, y, x, color);
}
}
}
// return nothing
return 0;
}
int image_(Main_rgb2y)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *yim = luaT_checkudata(L, 2, torch_Tensor);
luaL_argcheck(L, rgb->nDimension == 3, 1, "image.rgb2y: src not 3D");
luaL_argcheck(L, yim->nDimension == 2, 2, "image.rgb2y: dst not 2D");
luaL_argcheck(L, rgb->size[1] == yim->size[0], 2,
"image.rgb2y: src and dst not of same height");
luaL_argcheck(L, rgb->size[2] == yim->size[1], 2,
"image.rgb2y: src and dst not of same width");
int y, x;
temp_t r, g, b, yc;
const int height = rgb->size[1];
const int width = rgb->size[2];
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
// get Rgb
r = THTensor_(get3d)(rgb, 0, y, x);
g = THTensor_(get3d)(rgb, 1, y, x);
b = THTensor_(get3d)(rgb, 2, y, x);
yc = 0.299 * r + 0.587 * g + 0.114 * b;
THTensor_(set2d)(yim, y, x, image_(FromIntermediate)(yc));
}
}
return 0;
}
static inline void image_(drawPixel)(THTensor *output, int y, int x,
int cr, int cg, int cb) {
#ifdef TH_REAL_IS_BYTE
THTensor_(set3d)(output, 0, y, x, cr);
THTensor_(set3d)(output, 1, y, x, cg);
THTensor_(set3d)(output, 2, y, x, cb);
#else
THTensor_(set3d)(output, 0, y, x, cr / 255.0f);
THTensor_(set3d)(output, 1, y, x, cg / 255.0f);
THTensor_(set3d)(output, 2, y, x, cb / 255.0f);
#endif
}
static inline void image_(drawChar)(THTensor *output, int x, int y, unsigned char c, int size,
int cr, int cg, int cb,
int bg_cr, int bg_cg, int bg_cb) {
long channels = output->size[0];
long height = output->size[1];
long width = output->size[2];
/* out of bounds condition, return without drawing */
if((x >= width) || // Clip right
(y >= height) || // Clip bottom
((x + 6 * size - 1) < 0) || // Clip left
((y + 8 * size - 1) < 0)) // Clip top
return;
for(char i = 0; i < 6; i++ ) {
unsigned char line;
if (i < 5) {
line = *(const unsigned char *)(image_ada_font+(c*5) + i);
} else {
line = 0x0;
}
for(char j = 0; j < 8; j++, line >>= 1) {
if(line & 0x1) {
if (size == 1) {
image_(drawPixel)(output, y+j, x+i, cr, cg, cb);
}
else {
for (int ii = x+(i*size); ii < x+(i*size) + size; ii++) {
for (int jj = y+(j*size); jj < y+(j*size) + size; jj++) {
image_(drawPixel)(output, jj, ii, cr, cg, cb);
}
}
}
} else if (bg_cr != -1 && bg_cg != -1 && bg_cb != -1) {
if (size == 1) {
image_(drawPixel)(output, y+j, x+i, bg_cr, bg_cg, bg_cb);
} else {
for (int ii = x+(i*size); ii < x+(i*size) + size; ii++) {
for (int jj = y+(j*size); jj < y+(j*size) + size; jj++) {
image_(drawPixel)(output, jj, ii, bg_cr, bg_cg, bg_cb);
}
}
}
}
}
}
}
int image_(Main_drawtext)(lua_State *L) {
// get args
THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor);
const char* text = lua_tostring(L, 2);
long x = luaL_checklong(L, 3);
long y = luaL_checklong(L, 4);
int size = luaL_checkint(L, 5);
int cr = luaL_checkint(L, 6);
int cg = luaL_checkint(L, 7);
int cb = luaL_checkint(L, 8);
int bg_cr = luaL_checkint(L, 9);
int bg_cg = luaL_checkint(L, 10);
int bg_cb = luaL_checkint(L, 11);
int wrap = luaL_checkint(L, 12);
long len = strlen(text);
// dims
long channels = output->size[0];
long height = output->size[1];
long width = output->size[2];
long cursor_y = y;
long cursor_x = x;
for (long cnt = 0; cnt < len; cnt++) {
unsigned char c = text[cnt];
if(c == '\n') {
cursor_y += size*8;
cursor_x = x;
} else if(c == '\r') {
// skip em
} else {
if(wrap && ((cursor_x + size * 6) >= width)) { // Heading off edge?
cursor_x = 0; // Reset x to zero
cursor_y += size * 8; // Advance y one line
}
image_(drawChar)(output, cursor_x, cursor_y, c, size,
cr, cg, cb,
bg_cr, bg_cg, bg_cb);
cursor_x += size * 6;
}
}
return 0;
}
int image_(Main_drawRect)(lua_State *L) {
THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor);
long x1long = luaL_checklong(L, 2);
long y1long = luaL_checklong(L, 3);
long x2long = luaL_checklong(L, 4);
long y2long = luaL_checklong(L, 5);
int lineWidth = luaL_checkint(L, 6);
int cr = luaL_checkint(L, 7);
int cg = luaL_checkint(L, 8);
int cb = luaL_checkint(L, 9);
int loffset = lineWidth / 2 + 1;
int uoffset = lineWidth - loffset - 1;
int x1l = (int) MAX(0, x1long - loffset);
int y1l = (int) MAX(0, y1long - loffset);
int x1u = (int) MIN(output->size[2], x1long + uoffset + 1);
int y1u = (int) MIN(output->size[1], y1long + uoffset + 1);
int x2l = (int) MAX(0, x2long - loffset);
int y2l = (int) MAX(0, y2long - loffset);
int x2u = (int) MIN(output->size[2], x2long + uoffset + 1);
int y2u = (int) MIN(output->size[1], y2long + uoffset + 1);
for (int y = y1l; y < y2u; y++) {
for (int x = x1l; x < x1u; x++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
for (int x = x2l; x < x2u; x++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
}
for (int x = x1l; x < x2u; x++) {
for (int y = y1l; y < y1u; y++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
for (int y = y2l; y < y2u; y++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
}
return 0;
}
static const struct luaL_Reg image_(Main__) [] = {
{"scaleSimple", image_(Main_scaleSimple)},
{"scaleBilinear", image_(Main_scaleBilinear)},
{"scaleBicubic", image_(Main_scaleBicubic)},
{"rotate", image_(Main_rotate)},
{"rotateBilinear", image_(Main_rotateBilinear)},
{"polar", image_(Main_polar)},
{"polarBilinear", image_(Main_polarBilinear)},
{"logPolar", image_(Main_logPolar)},
{"logPolarBilinear", image_(Main_logPolarBilinear)},
{"translate", image_(Main_translate)},
{"cropNoScale", image_(Main_cropNoScale)},
{"warp", image_(Main_warp)},
{"saturate", image_(Main_saturate)},
{"rgb2y", image_(Main_rgb2y)},
{"rgb2hsv", image_(Main_rgb2hsv)},
{"rgb2hsl", image_(Main_rgb2hsl)},
{"hsv2rgb", image_(Main_hsv2rgb)},
{"hsl2rgb", image_(Main_hsl2rgb)},
{"rgb2lab", image_(Main_rgb2lab)},
{"lab2rgb", image_(Main_lab2rgb)},
{"gaussian", image_(Main_gaussian)},
{"vflip", image_(Main_vflip)},
{"hflip", image_(Main_hflip)},
{"flip", image_(Main_flip)},
{"colorize", image_(Main_colorize)},
{"text", image_(Main_drawtext)},
{"drawRect", image_(Main_drawRect)},
{NULL, NULL}
};
void image_(Main_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, image_(Main__), "image");
}
#endif // TH_GENERIC_FILE
|
GB_unop__identity_uint16_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_fc64)
// op(A') function: GB (_unop_tran__identity_uint16_fc64)
// C type: uint16_t
// A type: GxB_FC64_t
// cast: uint16_t cij = GB_cast_to_uint16_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_fc64)
(
uint16_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nevpt_contract.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#include "fci.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define BLK 48
#define BUFBASE 96
double FCI_t1ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
double FCI_t2ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
static void tril2pdm_particle_symm(double *rdm2, double *tbra, double *tket,
int bcount, int ncre, int norb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
int nnorb = norb * norb;
int nncre = norb * ncre;
dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nncre, &bcount,
&D1, tket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb);
}
// (df|ce) E^d_f E^a_e|0> = t_ac
void NEVPTkern_dfec_dfae(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, m, n;
size_t k;
double *cp0, *cp1;
double *t2t; // E^d_fE^a_e with ae transposed
#pragma omp parallel private(cp0, cp1, t2t, m, n, i, k)
{
t2t = malloc(sizeof(double) * n4);
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < bcount; k++) {
for (i = 0; i < nnorb; i++) {
cp0 = t2ket + k * n4 + i * nnorb;
cp1 = t2t + i * nnorb;
for (m = 0; m < norb; m++) {
for (n = 0; n < norb; n++) {
cp1[n*norb+m] = cp0[m*norb+n];
}
}
}
dgemm_(&TRANS_N, &TRANS_T, &norb, &norb, &n3,
&D1, eri, &norb, t2t, &norb,
&D0, gt2+nnorb*k, &norb);
}
free(t2t);
}
}
// (df|ea) E^e_c E^d_f|0> = t_ac
void NEVPTkern_aedf_ecdf(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, m, n;
size_t k;
double *cp0, *cp1;
double *t2t;
#pragma omp parallel private(cp0, cp1, t2t, m, n, i, k)
{
t2t = malloc(sizeof(double) * n4);
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < bcount; k++) {
for (m = 0; m < norb; m++) {
for (n = 0; n < norb; n++) {
cp0 = t2ket + k * n4 + (m*norb+n) * nnorb;
cp1 = t2t + (n*norb+m) * nnorb;
for (i = 0; i < nnorb; i++) {
cp1[i] = cp0[i];
}
}
}
dgemm_(&TRANS_T, &TRANS_N, &norb, &norb, &n3,
&D1, t2t, &n3, eri, &n3,
&D0, gt2+nnorb*k, &norb);
}
free(t2t);
}
}
// (df|ce) E^a_e E^d_f|0> = t_ac
void NEVPTkern_cedf_aedf(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
size_t k;
int blen;
#pragma omp parallel private(k, blen)
#pragma omp for schedule(dynamic, 1)
for (k = 0; k < bcount; k+=8) {
blen = MIN(bcount-k, 8) * norb;
dgemm_(&TRANS_T, &TRANS_N, &norb, &blen, &n3,
&D1, eri, &n3, t2ket+n4*k, &n3,
&D0, gt2+nnorb*k, &norb);
}
}
// (df|ea) E^d_f E^e_c|0> = t_ac
void NEVPTkern_dfea_dfec(double *gt2, double *eri, double *t2ket,
int bcount, int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
size_t k;
#pragma omp parallel private(k)
#pragma omp for schedule(dynamic, 4)
for (k = 0; k < bcount; k++) {
dgemm_(&TRANS_N, &TRANS_T, &norb, &norb, &n3,
&D1, t2ket+n4*k, &norb, eri, &norb,
&D0, gt2+nnorb*k, &norb);
}
}
// TODO: NEVPTkern_spin0 stra_id >= strb_id as FCI4pdm_kern_spin0
void NEVPTkern_sf(void (*contract_kernel)(),
double *rdm2, double *rdm3, double *eri, double *ci0,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, j, k, l, ij;
size_t n;
double *t1ket = malloc(sizeof(double) * nnorb * bcount);
double *t2ket = malloc(sizeof(double) * n4 * bcount);
double *gt2 = malloc(sizeof(double) * nnorb * bcount);
double *tbra, *pbra, *pt2;
// t2[:,i,j,k,l] = E^i_j E^k_l|ket>
FCI_t1ci_sf(ci0, t1ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(ci0, t2ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
(*contract_kernel)(gt2, eri, t2ket, bcount, norb, na, nb);
#pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2)
{
tbra = malloc(sizeof(double) * nnorb * bcount);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k)
i = ij / norb;
j = ij - i * norb;
for (n = 0; n < bcount; n++) {
for (k = 0; k <= j; k++) {
pbra = tbra + n * nnorb + k*norb;
pt2 = t2ket + n * n4 + k*nnorb + ij;
for (l = 0; l < norb; l++) {
pbra[l] = pt2[l*n3];
}
}
}
tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, gt2,
bcount, j+1, norb);
}
free(tbra);
}
// reordering of rdm2 is needed: rdm2.transpose(1,0,2,3)
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount,
&D1, gt2, &nnorb, t1ket, &nnorb,
&D1, rdm2, &nnorb);
free(gt2);
free(t1ket);
free(t2ket);
}
void NEVPTcontract(void (*kernel)(),
double *rdm2, double *rdm3, double *eri, double *ci0,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
const size_t nnorb = norb * norb;
const size_t n4 = nnorb * nnorb;
int i, j, k, ib, strk, bcount;
double *pdm2 = malloc(sizeof(double) * n4);
double *cp1, *cp0;
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na);
_LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb);
FCIcompress_link(clinka, link_indexa, norb, na, nlinka);
FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb);
memset(pdm2, 0, sizeof(double) * n4);
memset(rdm3, 0, sizeof(double) * n4 * nnorb);
for (strk = 0; strk < na; strk++) {
for (ib = 0; ib < nb; ib += BUFBASE) {
bcount = MIN(BUFBASE, nb-ib);
NEVPTkern_sf(kernel, pdm2, rdm3,
eri, ci0, bcount, strk, ib,
norb, na, nb, nlinka, nlinkb, clinka, clinkb);
}
}
free(clinka);
free(clinkb);
for (i = 0; i < norb; i++) {
for (j = 0; j < norb; j++) {
cp1 = rdm2 + (i*norb+j) * nnorb;
cp0 = pdm2 + (j*norb+i) * nnorb;
for (k = 0; k < nnorb; k++) {
cp1[k] = cp0[k];
}
} }
free(pdm2);
}
|
smg_residual.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
typedef struct
{
hypre_Index base_index;
hypre_Index base_stride;
hypre_StructMatrix *A;
hypre_StructVector *x;
hypre_StructVector *b;
hypre_StructVector *r;
hypre_BoxArray *base_points;
hypre_ComputePkg *compute_pkg;
HYPRE_Int time_index;
HYPRE_Int flops;
} hypre_SMGResidualData;
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void *
hypre_SMGResidualCreate( )
{
hypre_SMGResidualData *residual_data;
residual_data = hypre_CTAlloc(hypre_SMGResidualData, 1);
(residual_data -> time_index) = hypre_InitializeTiming("SMGResidual");
/* set defaults */
hypre_SetIndex3((residual_data -> base_index), 0, 0, 0);
hypre_SetIndex3((residual_data -> base_stride), 1, 1, 1);
return (void *) residual_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualSetup( void *residual_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *b,
hypre_StructVector *r )
{
hypre_SMGResidualData *residual_data = (hypre_SMGResidualData *)residual_vdata;
hypre_IndexRef base_index = (residual_data -> base_index);
hypre_IndexRef base_stride = (residual_data -> base_stride);
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_BoxArray *base_points;
hypre_ComputeInfo *compute_info;
hypre_ComputePkg *compute_pkg;
/*----------------------------------------------------------
* Set up base points and the compute package
*----------------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
stencil = hypre_StructMatrixStencil(A);
base_points = hypre_BoxArrayDuplicate(hypre_StructGridBoxes(grid));
hypre_ProjectBoxArray(base_points, base_index, base_stride);
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputeInfoProjectComp(compute_info, base_index, base_stride);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the residual data structure
*----------------------------------------------------------*/
(residual_data -> A) = hypre_StructMatrixRef(A);
(residual_data -> x) = hypre_StructVectorRef(x);
(residual_data -> b) = hypre_StructVectorRef(b);
(residual_data -> r) = hypre_StructVectorRef(r);
(residual_data -> base_points) = base_points;
(residual_data -> compute_pkg) = compute_pkg;
/*-----------------------------------------------------
* Compute flops
*-----------------------------------------------------*/
(residual_data -> flops) =
(hypre_StructMatrixGlobalSize(A) + hypre_StructVectorGlobalSize(x)) /
(hypre_IndexX(base_stride) *
hypre_IndexY(base_stride) *
hypre_IndexZ(base_stride) );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidual( void *residual_vdata,
hypre_StructMatrix *A,
hypre_StructVector *x,
hypre_StructVector *b,
hypre_StructVector *r )
{
hypre_SMGResidualData *residual_data = (hypre_SMGResidualData *)residual_vdata;
hypre_IndexRef base_stride = (residual_data -> base_stride);
hypre_BoxArray *base_points = (residual_data -> base_points);
hypre_ComputePkg *compute_pkg = (residual_data -> compute_pkg);
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_data_box;
hypre_Box *x_data_box;
hypre_Box *b_data_box;
hypre_Box *r_data_box;
HYPRE_Int Ai;
HYPRE_Int xi;
HYPRE_Int bi;
HYPRE_Int ri;
HYPRE_Real *Ap;
HYPRE_Real *xp;
HYPRE_Real *bp;
HYPRE_Real *rp;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int compute_i, i, j, si;
hypre_BeginTiming(residual_data -> time_index);
/*-----------------------------------------------------------------------
* Compute residual r = b - Ax
*-----------------------------------------------------------------------*/
stencil = hypre_StructMatrixStencil(A);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp = hypre_StructVectorData(x);
hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
/*----------------------------------------
* Copy b into r
*----------------------------------------*/
compute_box_a = base_points;
hypre_ForBoxI(i, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, i);
start = hypre_BoxIMin(compute_box);
b_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i);
r_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), i);
bp = hypre_StructVectorBoxData(b, i);
rp = hypre_StructVectorBoxData(r, i);
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size,
b_data_box, start, base_stride, bi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(bi, ri)
{
rp[ri] = bp[bi];
}
hypre_BoxLoop2End(bi, ri);
}
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
/*--------------------------------------------------------------------
* Compute r -= A*x
*--------------------------------------------------------------------*/
hypre_ForBoxArrayI(i, compute_box_aa)
{
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i);
A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
r_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), i);
rp = hypre_StructVectorBoxData(r, i);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
for (si = 0; si < stencil_size; si++)
{
Ap = hypre_StructMatrixBoxData(A, i, si);
xp = hypre_StructVectorBoxData(x, i) +
hypre_BoxOffsetDistance(x_data_box, stencil_shape[si]);
hypre_BoxGetStrideSize(compute_box, base_stride,
loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
A_data_box, start, base_stride, Ai,
x_data_box, start, base_stride, xi,
r_data_box, start, base_stride, ri);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ri) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, ri)
{
rp[ri] -= Ap[Ai] * xp[xi];
}
hypre_BoxLoop3End(Ai, xi, ri);
}
}
}
}
hypre_IncFLOPCount(residual_data -> flops);
hypre_EndTiming(residual_data -> time_index);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualSetBase( void *residual_vdata,
hypre_Index base_index,
hypre_Index base_stride )
{
hypre_SMGResidualData *residual_data = (hypre_SMGResidualData *)residual_vdata;
HYPRE_Int d;
for (d = 0; d < 3; d++)
{
hypre_IndexD((residual_data -> base_index), d)
= hypre_IndexD(base_index, d);
hypre_IndexD((residual_data -> base_stride), d)
= hypre_IndexD(base_stride, d);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SMGResidualDestroy( void *residual_vdata )
{
hypre_SMGResidualData *residual_data = (hypre_SMGResidualData *)residual_vdata;
if (residual_data)
{
hypre_StructMatrixDestroy(residual_data -> A);
hypre_StructVectorDestroy(residual_data -> x);
hypre_StructVectorDestroy(residual_data -> b);
hypre_StructVectorDestroy(residual_data -> r);
hypre_BoxArrayDestroy(residual_data -> base_points);
hypre_ComputePkgDestroy(residual_data -> compute_pkg );
hypre_FinalizeTiming(residual_data -> time_index);
hypre_TFree(residual_data);
}
return hypre_error_flag;
}
|
GB_unop__identity_uint32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_uint8)
// op(A') function: GB (_unop_tran__identity_uint32_uint8)
// C type: uint32_t
// A type: uint8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_uint8)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c | int main(){
int i = 0;
#pragma omp atomic
i++;
}
|
GB_unop__lnot_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_bool_bool)
// op(A') function: GB (_unop_tran__lnot_bool_bool)
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = !aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = !z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_bool_bool)
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = !z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = !z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_bool_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rwalk.c | #include "rwalk.h"
#include <omp.h>
#include <stdlib.h>
void random_walk(int const* ptr, int const* neighs, int n, int num_walks,
int num_steps, int seed, int nthread, int* walks) {
if (nthread > 0) {
omp_set_num_threads(nthread);
}
#pragma omp parallel
{
int thread_num = omp_get_thread_num();
unsigned int private_seed = (unsigned int)(seed + thread_num);
#pragma omp for
for (int i = 0; i < n; i++) {
int offset, num_neighs;
for (int walk = 0; walk < num_walks; walk++) {
int curr = i;
offset = i * num_walks * (num_steps + 1) + walk * (num_steps + 1);
walks[offset] = i;
for (int step = 0; step < num_steps; step++) {
num_neighs = ptr[curr + 1] - ptr[curr];
if (num_neighs > 0) {
curr = neighs[ptr[curr] + (rand_r(&private_seed) % num_neighs)];
}
walks[offset + step + 1] = curr;
}
}
}
}
}
|
GB_binop__gt_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint32)
// A*D function (colscale): GB (_AxD__gt_uint32)
// D*A function (rowscale): GB (_DxB__gt_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint32)
// C=scalar+B GB (_bind1st__gt_uint32)
// C=scalar+B' GB (_bind1st_tran__gt_uint32)
// C=A+scalar GB (_bind2nd__gt_uint32)
// C=A'+scalar GB (_bind2nd_tran__gt_uint32)
// C type: bool
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT32 || GxB_NO_GT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8499.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12];
#pragma omp parallel for private(t4,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12];
}
}
|
convolution_sgemm_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_packn_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
// Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * packn, packn, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr[4] = img0[l + packn * 4];
tmpptr[5] = img0[l + packn * 5];
tmpptr[6] = img0[l + packn * 6];
tmpptr[7] = img0[l + packn * 7];
tmpptr += 8;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(img0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(img0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(img0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(img0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
img0 += size * packn;
tmpptr += packn * 8;
#endif
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr += 4;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
img0 += size * packn;
tmpptr += packn * 4;
#endif
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr += 2;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
img0 += size * packn;
tmpptr += packn * 2;
#endif
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
vfloat32m1_t _val = vle32_v_f32m1(img0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
img0 += size * packn;
tmpptr += packn;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum0 = vle32_v_f32m1(bias + p * packn, vl);
_sum1 = vle32_v_f32m1(bias + p * packn, vl);
_sum2 = vle32_v_f32m1(bias + p * packn, vl);
_sum3 = vle32_v_f32m1(bias + p * packn, vl);
_sum4 = vle32_v_f32m1(bias + p * packn, vl);
_sum5 = vle32_v_f32m1(bias + p * packn, vl);
_sum6 = vle32_v_f32m1(bias + p * packn, vl);
_sum7 = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
float val4 = *tmpptr++;
float val5 = *tmpptr++;
float val6 = *tmpptr++;
float val7 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl);
vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl);
vse32_v_f32m1(outptr0 + packn * 4, _sum4, vl);
vse32_v_f32m1(outptr0 + packn * 5, _sum5, vl);
vse32_v_f32m1(outptr0 + packn * 6, _sum6, vl);
vse32_v_f32m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum0 = vle32_v_f32m1(bias + p * packn, vl);
_sum1 = vle32_v_f32m1(bias + p * packn, vl);
_sum2 = vle32_v_f32m1(bias + p * packn, vl);
_sum3 = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl);
vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum0 = vle32_v_f32m1(bias + p * packn, vl);
_sum1 = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
outptr0 += packn * 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum, vl);
outptr0 += packn;
}
}
}
static void convolution_im2col_sgemm_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * packn;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _val = vle32_v_f32m1(sptr, vl);
vse32_v_f32m1(ptr, _val, vl);
sptr += stride_w * packn;
ptr += packn;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_packn_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
sparse_to_mat.c | #include "main.h"
mat_rv coo_to_mat_nothreading(coo matrix)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
rv.error = ERR_NONE;
rv.rows = matrix.rows;
rv.cols = matrix.cols;
rv.type = matrix.type;
rv.isval = false;
if(matrix.type == MAT_INT){
if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
for(int i = 0; i < matrix.length; ++i){
if(!(matrix.elems[i].i < rv.rows && matrix.elems[i].j < rv.cols)){
rv.error = ERR_DIM_MISSMATCH;
break;
}
if(rv.type == MAT_INT){
if (rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0){
rv.error = ERR_DUPLICATE;
break;
}
rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.i;
}
else{
if (rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0.0){
rv.error = ERR_DUPLICATE;
break;
}
rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.f;
}
}
get_utc_time(&end);
rv.t_construct = time_delta(end, start);
return rv;
}
mat_rv coo_to_mat(coo matrix, int thread_count)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
rv.error = ERR_NONE;
rv.rows = matrix.rows;
rv.cols = matrix.cols;
rv.type = matrix.type;
rv.isval = false;
if(matrix.type == MAT_INT){
if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
int i;
#pragma omp parallel num_threads(thread_count) shared(matrix, rv)
{
MAT_TYPE local_type = matrix.type;
#pragma omp for private(i)
for(i = 0; i < matrix.length; ++i){
if(!(matrix.elems[i].i < rv.rows && matrix.elems[i].j < rv.cols))
rv.error = ERR_DIM_MISSMATCH;
if(local_type == MAT_INT){
if(rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0)
rv.error = ERR_DUPLICATE;
rv.vals.i[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.i;
}
else{
if (rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] != 0.0)
rv.error = ERR_DUPLICATE;
rv.vals.f[matrix.elems[i].i * rv.cols + matrix.elems[i].j] = matrix.elems[i].val.f;
}
}
}
get_utc_time(&end);
rv.t_construct = time_delta(end, start);
return rv;
}
mat_rv csr_to_mat_nothreading(csr matrix)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
rv.error = ERR_NONE;
rv.rows = matrix.rows;
rv.cols = matrix.cols;
rv.type = matrix.type;
rv.isval = false;
if(matrix.type == MAT_INT){
if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
for(int i = 0; i < rv.rows; ++i){
//iterator for nnz/ja
int a_i = matrix.ia[i];
for(int j = 0; j < rv.cols; ++j){
if(a_i < matrix.ia[i + 1]){
if(matrix.ja[a_i] == j){
if(rv.type == MAT_INT)
rv.vals.i[i*rv.cols + j] = matrix.nnz.i[a_i];
else
rv.vals.f[i*rv.cols + j] = matrix.nnz.f[a_i];
a_i++;
continue;
}
}
if(rv.type == MAT_INT)
rv.vals.i[i*rv.cols + j] = 0;
else
rv.vals.f[i*rv.cols + j] = 0.0;
}
if(a_i != matrix.ia[i + 1]){
rv.error = ERR_DIM_MISSMATCH;
break;
}
}
get_utc_time(&end);
rv.t_construct = time_delta(end, start);
return rv;
}
mat_rv csr_to_mat(csr matrix, int thread_count)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
rv.error = ERR_NONE;
rv.rows = matrix.rows;
rv.cols = matrix.cols;
rv.type = matrix.type;
rv.isval = false;
if(matrix.type == MAT_INT){
if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
int i;
#pragma omp parallel num_threads(thread_count) shared(rv, matrix)
{
MAT_TYPE local_type = rv.type;
#pragma omp for private(i)
for(i = 0; i < rv.rows; ++i){
int a_i = matrix.ia[i];
for(int j = 0; j < rv.cols; ++j){
if(a_i < matrix.ia[i + 1]){
if(matrix.ja[a_i] == j){
if(local_type == MAT_INT)
rv.vals.i[i*rv.cols + j] = matrix.nnz.i[a_i];
else
rv.vals.f[i*rv.cols + j] = matrix.nnz.f[a_i];
a_i++;
continue;
}
}
if(local_type == MAT_INT)
rv.vals.i[i*rv.cols + j] = 0;
else
rv.vals.f[i*rv.cols + j] = 0.0;
}
if(a_i != matrix.ia[i + 1])
rv.error = ERR_DIM_MISSMATCH;
}
}
get_utc_time(&end);
rv.t_construct = time_delta(end, start);
return rv;
}
mat_rv csc_to_mat_nothreading(csc matrix)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
rv.error = ERR_NONE;
rv.rows = matrix.rows;
rv.cols = matrix.cols;
rv.type = matrix.type;
rv.isval = false;
if(matrix.type == MAT_INT){
if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
for(int i = 0; i < rv.cols; ++i){
//iterator for nnz/ja
int a_i = matrix.ia[i];
for(int j = 0; j < rv.rows; ++j){
if(a_i < matrix.ia[i + 1]){
if(matrix.ja[a_i] == j){
if(rv.type == MAT_INT)
rv.vals.i[j*rv.cols + i] = matrix.nnz.i[a_i];
else
rv.vals.f[j*rv.cols + i] = matrix.nnz.f[a_i];
a_i++;
continue;
}
}
if(rv.type == MAT_INT)
rv.vals.i[j*rv.cols + i] = 0;
else
rv.vals.f[j*rv.cols + i] = 0.0;
}
if(a_i != matrix.ia[i + 1]){
rv.error = ERR_DIM_MISSMATCH;
break;
}
}
get_utc_time(&end);
rv.t_construct = time_delta(end, start);
return rv;
}
mat_rv csc_to_mat(csc matrix, int thread_count)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
rv.error = ERR_NONE;
rv.cols = matrix.cols;
rv.rows = matrix.rows;
rv.type = matrix.type;
rv.isval = false;
if(matrix.type == MAT_INT){
if(!(rv.vals.i = (int*)calloc((rv.rows * rv.cols), sizeof(int)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(rv.vals.f = (long double*)calloc((rv.rows * rv.cols), sizeof(long double)))){
fprintf(stderr,"Ran out of virtual memory while allocating mat_rv struct\n");
exit(EXIT_FAILURE);
}
}
int i;
#pragma omp parallel num_threads(thread_count) shared(rv, matrix)
{
MAT_TYPE local_type = rv.type;
#pragma omp for private(i)
for(i = 0; i < rv.cols; ++i){
int a_i = matrix.ia[i];
for(int j = 0; j < rv.rows; ++j){
if(a_i < matrix.ia[i + 1]){
if(matrix.ja[a_i] == j){
if(local_type == MAT_INT)
rv.vals.i[i*rv.rows + j] = matrix.nnz.i[a_i];
else
rv.vals.f[i*rv.rows + j] = matrix.nnz.f[a_i];
a_i++;
continue;
}
}
if(local_type == MAT_INT)
rv.vals.i[i*rv.rows + j] = 0;
else
rv.vals.f[i*rv.rows + j] = 0.0;
}
if(a_i != matrix.ia[i + 1])
rv.error = ERR_DIM_MISSMATCH;
}
}
get_utc_time(&end);
rv.t_construct = time_delta(end, start);
return rv;
}
|
row_wise_v2.c |
/****
Author: Rayhan Shikder,
email: shikderr@myumanitoba.ca
MSc Student,
Department of Computer Science,
University of Manitoba, Winnipeg, MB, Canada
****/
#include<stdio.h>
#include<string.h>
#include <stdlib.h>
#include<mpi.h>
#include<omp.h>
#include<time.h>
//macros
#define ALPHABET_LENGTH 4
#define max(x,y) ((x)>(y)?(x):(y))
//global variables
char *string_A;
char *string_B;
char *unique_chars_C; //unique alphabets
int c_len;
short *P_Matrix;
short **DP_Results; //to store the DP values
//function prototypes
int get_index_of_character(char *str,char x, int len);
void print_matrix(short **x, int row, int col);
void calc_P_matrix_v2(short *P, char *b, int len_b, char *c, int len_c, int myrank, int chunk_size);
short lcs_yang_v2(short **DP, short *P, char *A, char *B, char *C, int m, int n, int u, int myrank, int chunk_size);
short lcs(short **DP, char *A, char *B, int m, int n);
int get_index_of_character(char *str,char x, int len)
{
for(int i=0;i<len;i++)
{
if(str[i]== x)
{
return i;
}
}
return -1;//not found the character x in str
}
void print_matrix(short **x, int row, int col)
{
for(int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
printf("%d ",x[i][j]);
}
printf("\n");
}
}
void calc_P_matrix_v2(short *P, char *b, int len_b, char *c, int len_c, int myrank, int chunk_size)
{
char receive_array_for_scatter_c[chunk_size];
short receive_array_for_scatter_p[chunk_size*(len_b+1)];
//Scatter the char array chunks by sending each process a particular chunk
MPI_Scatter(c, chunk_size, MPI_CHAR,&receive_array_for_scatter_c,chunk_size,MPI_CHAR, 0, MPI_COMM_WORLD);
//Scatter the char array chunks by sending each process a particular chunk
MPI_Scatter(P, chunk_size*(len_b+1), MPI_SHORT,&receive_array_for_scatter_p,chunk_size*(len_b+1),MPI_SHORT, 0, MPI_COMM_WORLD);
// Broadcast the whole b array to everybody
MPI_Bcast(b, len_b, MPI_CHAR, 0, MPI_COMM_WORLD);
#pragma omp parallel for
for(int i=0;i<chunk_size;i++)
{
for(int j=1;j<len_b+1;j++)
{
if(b[j-1]==receive_array_for_scatter_c[i])
{
receive_array_for_scatter_p[(i*(len_b+1))+j] = j;
}
else
{
receive_array_for_scatter_p[(i*(len_b+1))+j] = receive_array_for_scatter_p[(i*(len_b+1))+j-1];
}
}
}
//now gather all the calculated values of P matrix in process 0
MPI_Gather(receive_array_for_scatter_p, chunk_size*(len_b+1), MPI_SHORT, P, chunk_size*(len_b+1), MPI_SHORT, 0, MPI_COMM_WORLD);
}
short lcs_yang_v2(short **DP, short *P, char *A, char *B, char *C, int m, int n, int u, int myrank, int chunk_size)
{
MPI_Bcast(P, (u*(n+1)), MPI_SHORT, 0, MPI_COMM_WORLD);
for(int i=1;i<m+1;i++)
{
int c_i = get_index_of_character(C,A[i-1],u);
short dp_i_receive[chunk_size];
MPI_Scatter(DP[i], chunk_size, MPI_SHORT,&dp_i_receive,chunk_size,MPI_SHORT, 0, MPI_COMM_WORLD);
int start_id = (myrank * chunk_size);
int end_id = (myrank * chunk_size) + chunk_size;
int t,s;
#pragma omp parallel for private(t,s) schedule(static)
for(int j=start_id;j<end_id;j++)
{
if(j==start_id && myrank==0)j=j+1;
t= (0-P[(c_i*(n+1))+j])<0;
s= (0 - (DP[i-1][j] - (t*DP[i-1][P[(c_i*(n+1))+j]-1]) ));
dp_i_receive[j-start_id] = ((t^1)||(s^0))*(DP[i-1][j]) + (!((t^1)||(s^0)))*(DP[i-1][P[(c_i*(n+1))+j]-1] + 1);
}
//now gather all the calculated values of P matrix in process 0
MPI_Allgather(dp_i_receive, chunk_size, MPI_SHORT,DP[i], chunk_size, MPI_SHORT, MPI_COMM_WORLD);
}
return DP[m][n];
}
short lcs(short **DP, char *A, char *B, int m, int n)
{
// printf("%s %d \n%s %d\n",A,m,B,n );
//print_matrix(DP,m+1,n+1);
for(int i=1;i<(m+1);i++)
{
for(int j=1;j<(n+1);j++)
{
if(A[i-1] == B[j-1])
{
DP[i][j] = DP[i-1][j-1] + 1;
}
else
{
DP[i][j] = max(DP[i-1][j],DP[i][j-1]);
}
}
}
return DP[m][n];
}
int main(int argc, char *argv[])
{
if(argc <= 1){
printf("Error: No input file specified! Please specify the input file, and run again!\n");
return 0;
}
int my_rank;
int num_procs;
int chunk_size_p,chunk_size_dp;//chunk_size for P matrix and DP matrix
int res;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); //grab this process's rank
MPI_Comm_size(MPI_COMM_WORLD, &num_procs); //grab the total num of processes
FILE *fp;
int len_a,len_b;
double start_time,stop_time,start_time_yang,stop_time_yang;
if(my_rank == 0)printf("\nYour input file: %s \n",argv[1]);
fp = fopen(argv[1], "r");
fscanf(fp, "%d %d %d", &len_a, &len_b, &c_len);
string_A = (char *)malloc((len_a+1) * sizeof(char *));
string_B = (char *)malloc((len_b+1) * sizeof(char *));
unique_chars_C = (char *)malloc((c_len+1) * sizeof(char *));
fscanf(fp, "%s %s %s", string_A,string_B,unique_chars_C);
chunk_size_p = (c_len/num_procs);
chunk_size_dp = ((len_b+1)/num_procs);
if(my_rank==0)
{
printf("chunk_p: %d chunk_dp: %d procs: %d\n",chunk_size_p,chunk_size_dp,num_procs);
}
DP_Results = (short **)malloc((len_a+1) * sizeof(short *));
for(int k=0;k<len_a+1;k++)
{
DP_Results[k] = (short *)calloc((len_b+1), sizeof(short));
}
P_Matrix = (short *)malloc((c_len*(len_b+1)) * sizeof(short));
for(int k=0;k<len_a+1;k++)
{
for(int l=0;l<len_b+1;l++)
{
DP_Results[k][l]=0;
}
}
start_time_yang = MPI_Wtime();
calc_P_matrix_v2(P_Matrix,string_B,len_b,unique_chars_C,c_len, my_rank, chunk_size_p);
res = lcs_yang_v2(DP_Results,P_Matrix,string_A,string_B,unique_chars_C,len_a,len_b,c_len,my_rank, chunk_size_dp);
stop_time_yang = MPI_Wtime();
if(my_rank == 0)
{
printf("lcs_yang_v2 is: %d\n",res);
printf("time taken for lcs_yang_v2 is: %lf\n",stop_time_yang-start_time_yang);
}
//deallocate pointers
free(P_Matrix);
free(DP_Results);
// Shutdown MPI (important - don't forget!)
MPI_Finalize();
return 0;
}
|
database_chunk.h | /*
* database_chunk.h
*
* Created on: 2012/11/08
* Author: shu
*/
#ifndef DATABASE_CHUNK_H_
#define DATABASE_CHUNK_H_
#include <fstream>
#include <string>
#include <stdint.h>
#include <tr1/memory>
#include "alphabet_coder.h"
#include "seed_searcher.h"
class DatabaseChunk {
public:
DatabaseChunk();
DatabaseChunk(std::vector<Sequence> &sequences, AlphabetCoder &coder,
AlphabetCoder::Code sequence_delimiter,
SeedSearcherDatabaseParameters::BuildParameters &seed_search_parameters_build_parameters);
DatabaseChunk(std::string filename_prefix);
bool Build(std::vector<Sequence> &sequences, AlphabetCoder &coder,
AlphabetCoder::Code sequence_delimiter,
SeedSearcherDatabaseParameters::BuildParameters &seed_search_parameters_build_parameters);
bool Clear();
uint32_t GetNumberSequences() {
return number_sequences_;
}
uint32_t GetConcatenatedSequenceLength() {
return concatenated_sequences_length_;
}
std::string GetName(uint32_t id) {
#pragma omp critical(load_lock_for_names)
{
if (names_.size() == 0) {
LoadNames(filename_prefix_);
}
}
return names_[id];
}
uint32_t GetOffsets(uint32_t id) {
#pragma omp critical(load_lock_for_offsets)
{
if (offsets_.size() == 0) {
LoadOffsets(filename_prefix_);
}
}
return offsets_[id];
}
AlphabetCoder::Code *GetConcatenatedSequence() {
#pragma omp critical(load_lock_for_concatenated_sequence)
{
if (concatenated_sequence_.size() == 0) {
LoadConcatenatedSequence(filename_prefix_);
}
}
return &concatenated_sequence_[0];
}
SeedSearcherDatabaseParameters &GetSeedSearcherParameters() {
#pragma omp critical(load_lock_for_seed_searcher_parameters)
{
if (!setted_seed_searcher_parameters_) {
LoadSeedSearcherParameters(filename_prefix_);
}
}
return seed_searcher_parameters_;
}
uint32_t GetId(uint32_t position);
bool Load(std::string filename_prefix);
bool Save(std::string filename_prefix);
private:
static std::string GetInformationFileName(std::string filename_prefix) {
return filename_prefix + ".inf";
}
static std::string GetOffsetsFileName(std::string filename_prefix) {
return filename_prefix + ".off";
}
static std::string GetNamesFileName(std::string filename_prefix) {
return filename_prefix + ".nam";
}
static std::string GetSequencesFileName(std::string filename_prefix) {
return filename_prefix + ".seq";
}
static std::string GetIndexFileName(std::string filename_prefix) {
return filename_prefix + ".src";
}
void EncodeSequences(AlphabetCoder &coder, std::vector<Sequence> &sequences,
AlphabetCoder::Code sequence_delimiter);
bool LoadInfomation(std::string filename_prefix);
bool LoadOffsets(std::string filename_prefix);
bool LoadNames(std::string filename_prefix);
bool LoadConcatenatedSequence(std::string filename_prefix);
bool LoadSeedSearcherParameters(std::string filename_prefix);
bool SaveInfomation(std::string filename_prefix);
bool SaveOffsets(std::string filename_prefix);
bool SaveNames(std::string filename_prefix);
bool SaveConcatenatedSequence(std::string filename_prefix);
bool SaveSeedSearcherParameters(std::string filename_prefix);
bool building_;
std::string filename_prefix_;
uint32_t number_sequences_;
uint32_t concatenated_sequences_length_;
std::vector<std::string> names_;
std::vector<uint32_t> offsets_;
std::vector<AlphabetCoder::Code> concatenated_sequence_;
bool setted_seed_searcher_parameters_;
SeedSearcherDatabaseParameters seed_searcher_parameters_;
};
#endif /* DATABASE_CHUNK_H_ */
|
spinless_fermion_basis_core.h | #ifndef _SPINLESS_FERMION_BASIS_OP_H
#define _SPINLESS_FERMION_BASIS_OP_H
#include <complex>
#include "hcb_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "openmp.h"
namespace basis_general {
template<class I>
void mergeSort(I nums[],I work[],const I left,const I mid,const I right, bool &f_count){
I leftLength = mid - left + 1;
I rightLength = right - mid;
I * lAr = work;
I * rAr = work+leftLength;
for (I i = 0; i < leftLength; i++) {
lAr[i] = nums[left + i];
}
for (I i = 0; i < rightLength; i++) {
rAr[i] = nums[mid + 1 + i];
}
I i = 0, j = 0, k = left;
while (i < leftLength && j < rightLength) {
if (lAr[i] >= rAr[j]) {
nums[k] = lAr[i];
if(j&1){f_count ^= 1;}
i++;
} else {
nums[k] = rAr[j];
j++;
}
k++;
}
//remaining iversions
if((j&1) && ((leftLength-i)&1)){f_count ^= 1;}
if (i >= leftLength) {
//copy remaining elements from right
for (; j < rightLength; j++, k++) {
nums[k] = rAr[j];
}
} else {
//copy remaining elements from left
for (; i < leftLength; i++, k++) {
nums[k] = lAr[i];
}
}
}
//I sort the array using merge sort technique.
template<class I>
void getf_count(I nums[],I work[], I left, I right, bool &f_count){
if (left < right) {
I mid = (I)((int)left + (int)right) / 2;
getf_count(nums, work, left, mid, f_count);
getf_count(nums, work, (I)(mid + 1), right, f_count);
mergeSort(nums, work, left, mid, right, f_count);
}
}
template<class I,class P>
I inline spinless_fermion_map_bits(I s,const int map[],const int N,P &sign){
I ss = 0;
int np = 0;
int pos_list[bit_info<I>::bits];
int work[bit_info<I>::bits];
bool f_count = 0;
for(int i=N-1;i>=0;--i){
int j = map[i];
I n = (s&1);
bool neg = j<0;
if(n){
pos_list[np++] = ( neg ? -(j+1) : j);
f_count ^= (neg&&(i&1));
}
ss ^= ( neg ? (n^1)<<(N+j) : n<<(N-j-1) );
s >>= 1;
}
getf_count(pos_list,work,0,np-1,f_count);
if(f_count){sign *= -1;}
return ss;
}
template<class I,class P>
void get_map_sign(I s,I inv,P &sign){
typename bit_info<I>::bit_index_type pos_list[bit_info<I>::bits];
bool f_count = 0;
I ne = bit_count(bit_info<I>::eob&s,bit_info<I>::bits-1); // count number of partices on odd sites
f_count ^= (ne&1);
typename bit_info<I>::bit_index_type n = bit_pos(s,pos_list) - 1; // get bit positions
getf_count(pos_list,(typename bit_info<I>::bit_index_type)0,n,f_count);
if(f_count){sign *= -1;}
}
template<class I,class P=signed char>
class spinless_fermion_basis_core : public hcb_basis_core<I,P>
{
public:
spinless_fermion_basis_core(const int _N) : \
hcb_basis_core<I>::hcb_basis_core(_N,true) { }
spinless_fermion_basis_core(const int _N,const int _nt,const int _maps[], \
const int _pers[], const int _qs[]) : \
hcb_basis_core<I>::hcb_basis_core(_N,_nt,_maps,_pers,_qs,true) {}
~spinless_fermion_basis_core(){}
// I map_state(I s,int n_map,int &sign){
// if(general_basis_core<I,P>::nt<=0){
// return s;
// }
// get_map_sign<I>(s,hcb_basis_core<I>::invs[n_map],sign);
// return benes_bwd(&hcb_basis_core<I>::benes_maps[n_map],s^hcb_basis_core<I>::invs[n_map]);;
// }
// void map_state(I s[],npy_intp M,int n_map,signed char sign[]){
// if(general_basis_core<I,P>::nt<=0){
// return;
// }
// const tr_benes<I> * benes_map = &hcb_basis_core<I>::benes_maps[n_map];
// const I inv = hcb_basis_core<I>::invs[n_map];
// #pragma omp for schedule(static,1)
// for(npy_intp i=0;i<M;i++){
// int temp_sign = sign[i];
// get_map_sign<I>(s[i],inv,temp_sign);
// s[i] = benes_bwd(benes_map,s[i]^inv);
// sign[i] = temp_sign;
// }
// }
I map_state(I s,int n_map,P &sign){
if(general_basis_core<I,P>::nt<=0){
return s;
}
const int n = general_basis_core<I,P>::N;
return spinless_fermion_map_bits(s,&general_basis_core<I,P>::maps[n_map*n],n,sign);
}
void map_state(I s[],npy_intp M,int n_map,P sign[]){
if(general_basis_core<I,P>::nt<=0){
return;
}
const int n = general_basis_core<I,P>::N;
const int * map = &general_basis_core<I,P>::maps[n_map*n];
#pragma omp for schedule(static)
for(npy_intp i=0;i<M;i++){
s[i] = spinless_fermion_map_bits(s[i],map,n,sign[i]);
}
}
int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){
const I s = r;
const I one = 1;
for(int j=n_op-1;j>-1;j--){
const int ind = general_basis_core<I,P>::N-indx[j]-1;
I f_count = bit_count(r,ind);
double sign = ((f_count&1)?-1:1);
const I b = (one << ind);
const bool a = (bool)((r >> ind)&one);
const char op = opstr[j];
switch(op){
case 'z':
m *= (a?0.5:-0.5);
break;
case 'n':
m *= (a?1:0);
break;
case '+':
m *= (a?0:sign);
r ^= b;
break;
case '-':
m *= (a?sign:0);
r ^= b;
break;
case 'I':
break;
default:
return -1;
}
if(m.real()==0 && m.imag()==0){
r = s;
break;
}
}
return 0;
}
};
}
#endif
|
parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd foo
void test_no_clause() {
int i;
#pragma omp parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}}
#pragma omp parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a positive integer value}}
#pragma omp parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a positive integer value}}
#pragma omp parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a positive integer value}}
#pragma omp parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
#pragma omp parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp parallel for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp parallel for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp parallel for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp parallel for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp parallel for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp parallel for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
data_gen.c | /*
* Copyright (C) 2014-2015, 2018 Intel Corporation
*
* SPDX-License-Identifier: MIT
*/
#define _XOPEN_SOURCE
#define _BSD_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <ia32intrin.h>
#include "euro_opt.h"
tfloat RandRange( tfloat a, tfloat b, struct drand48_data *seed ) {
double r;
drand48_r(seed, &r);
return r*(b-a) + a;
}
/*
// This function allocates arrays to hold input and output parameters
// for the Black-Scholes formula.
// nopt - length of arrays
// Random input parameters
// s0 - initial price
// x - strike price
// t - maturity
// Output arrays for call and put prices
// vcall_compiler, vcall_mkl
// vput_compiler, vput_mkl
*/
void InitData( int nopt, tfloat* *s0, tfloat* *x, tfloat* *t,
tfloat* *vcall_compiler, tfloat* *vput_compiler,
tfloat* *vcall_mkl, tfloat* *vput_mkl
)
{
tfloat *ts0, *tx, *tt, *tvcall_compiler, *tvput_compiler, *tvcall_mkl, *tvput_mkl;
int i;
/* Allocate aligned memory */
ts0 = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
tx = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
tt = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
tvcall_compiler = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
tvput_compiler = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
tvcall_mkl = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
tvput_mkl = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR);
if ( (ts0 == NULL) || (tx == NULL) || (tt == NULL) ||
(tvcall_compiler == NULL) || (tvput_compiler == NULL) ||
(tvcall_mkl == NULL) || (tvput_mkl == NULL) )
{
printf("Memory allocation failure\n");
exit(-1);
}
/* NUMA-friendly data init */
#pragma omp parallel
{
struct drand48_data seed;
srand48_r(omp_get_thread_num()+SEED, &seed);
#pragma omp for simd
for ( i = 0; i < nopt; i++ )
{
ts0[i] = RandRange( S0L, S0H, &seed );
tx[i] = RandRange( XL, XH, &seed );
tt[i] = RandRange( TL, TH, &seed );
tvcall_compiler[i] = 0.0;
tvput_compiler[i] = 0.0;
tvcall_mkl[i] = 0.0;
tvput_mkl[i] = 0.0;
}
}
*s0 = ts0;
*x = tx;
*t = tt;
*vcall_compiler = tvcall_compiler;
*vput_compiler = tvput_compiler;
*vcall_mkl = tvcall_mkl;
*vput_mkl = tvput_mkl;
}
/* Deallocate arrays */
void FreeData( tfloat *s0, tfloat *x, tfloat *t,
tfloat *vcall_compiler, tfloat *vput_compiler,
tfloat *vcall_mkl, tfloat *vput_mkl
)
{
/* Free memory */
_mm_free(s0);
_mm_free(x);
_mm_free(t);
_mm_free(vcall_compiler);
_mm_free(vput_compiler);
_mm_free(vcall_mkl);
_mm_free(vput_mkl);
}
|
taskdep1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// two tasks with depend clause to ensure execution order
// i is shared for two tasks based on implicit data-sharing attribute rules.
#include <assert.h>
int main()
{
int i=0;
#pragma omp parallel
#pragma omp single
{
#pragma omp task depend (out:i)
i = 1;
#pragma omp task depend (in:i)
i = 2;
}
assert (i==2);
return 0;
}
|
multiply.h | #pragma once
#include "intgemm/intgemm_config.h"
#include "interleave.h"
#include "intrinsics.h"
#include "vec_traits.h"
#include "callbacks.h"
namespace intgemm {
INTGEMM_SSE2 static inline dvector_t<CPUType::SSE2, int> PermuteSummer(__m128i pack0123, __m128i pack4567) {
// No op for 128 bits: already reduced fully.
return { pack0123, pack4567 };
}
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX2
INTGEMM_AVX2 static inline __m256i PermuteSummer(__m256i pack0123, __m256i pack4567) {
// This instruction generates 1s 2s 3s 4s 5f 6f 7f 8f
__m256i rev = _mm256_permute2f128_si256(pack0123, pack4567, 0x21);
// This instruction generates 1f 2f 3f 4f 5s 6s 7s 8s
__m256i blended = _mm256_blend_epi32(pack0123, pack4567, 0xf0);
return _mm256_add_epi32(rev, blended);
}
#endif
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline __m256i PermuteSummer(__m512i pack0123, __m512i pack4567) {
// Form [0th 128-bit register of pack0123, 0st 128-bit register of pack4567, 2nd 128-bit register of pack0123, 2nd 128-bit register of pack4567]
__m512i mix0 = _mm512_mask_permutex_epi64(pack0123, 0xcc, pack4567, (0 << 4) | (1 << 6));
// Form [1st 128-bit register of pack0123, 1st 128-bit register of pack4567, 3rd 128-bit register of pack0123, 3rd 128-bit register of pack4567]
__m512i mix1 = _mm512_mask_permutex_epi64(pack4567, 0x33, pack0123, 2 | (3 << 2));
__m512i added = _mm512_add_epi32(mix0, mix1);
// Now we have 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7.
// Fold register over itself.
return _mm256_add_epi32(_mm512_castsi512_si256(added), _mm512_extracti64x4_epi64(added, 1));
}
#endif
#ifdef _MSC_VER
#define INTGEMM_OMP_FOR __pragma(omp for)
#define INTGEMM_OMP_PARALLEL __pragma(omp parallel)
#else
#define INTGEMM_OMP_FOR _Pragma("omp for")
#define INTGEMM_OMP_PARALLEL _Pragma("omp parallel")
#endif
// Quantize function used for SSSE3 and AVX2.
// Separate function for thread to work around gcc 7 bug that doesn't imbue
// target attributes across #pragma omp parallel.
#define INTGEMM_QUANTIZE_THREAD(target) \
target static void QuantizeThread(const float *input, int8_t *output, float quant_mult, std::size_t count) { \
FRegister q = set1_ps<FRegister>(quant_mult); \
INTGEMM_OMP_FOR \
for (std::size_t i = 0; i < count; i += sizeof(Register)) { \
*reinterpret_cast<Register*>(output + i) = QuantizeTile8::Consecutive(q, input + i); \
} \
}
#define INTGEMM_QUANTIZE(target) \
target static void Quantize(const float *const input, int8_t *const output, float quant_mult, Index size) { \
assert(reinterpret_cast<uintptr_t>(input) % sizeof(Register) == 0); \
assert(reinterpret_cast<uintptr_t>(output) % sizeof(Register) == 0); \
const std::size_t kBatch = sizeof(Register); \
const std::size_t fast_end = size & ~(kBatch - 1); \
INTGEMM_OMP_PARALLEL \
{ \
QuantizeThread(input, output, quant_mult, fast_end); \
} \
std::size_t overhang = size & (kBatch - 1); \
if (!overhang) return; \
FRegister q = set1_ps<FRegister>(quant_mult); \
/* Each does size(Register) / 32 == kBatch / 4 floats at a time.
* If we're allowed to read one of them, then we can read the whole register. */ \
const float *inputs[4]; \
std::size_t i; \
for (i = 0; i < (overhang + (kBatch / 4) - 1) / (kBatch / 4); ++i) { \
inputs[i] = &input[fast_end + i * (kBatch / 4)]; \
} \
/* These will be clipped off. */ \
for (; i < 4; ++i) { \
inputs[i] = &input[fast_end]; \
} \
Register result = QuantizeTile8::Tile(q, inputs[0], inputs[1], inputs[2], inputs[3]); \
std::memcpy(output + (size & ~(kBatch - 1)), &result, overhang); \
}
/* Take 4 registers with 32-bit values to be horizontally added. Reduce them
* to one register with 32-bit values in the pattern 1 2 3 4 1 2 3 4, leaving
* the final addition (which crosses 128-bit lanes) to the caller.
*/
#define INTGEMM_PACK0123(target, Register) \
target inline Register Pack0123(Register sum0, Register sum1, Register sum2, Register sum3) { \
Interleave32(sum0, sum1); \
Register pack01 = add_epi32(sum0, sum1); \
Interleave32(sum2, sum3); \
Register pack23 = add_epi32(sum2, sum3); \
Interleave64(pack01, pack23); \
return add_epi32(pack01, pack23); \
} \
INTGEMM_PACK0123(INTGEMM_SSE2, __m128i)
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX2
INTGEMM_PACK0123(INTGEMM_AVX2, __m256i)
#endif
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_PACK0123(INTGEMM_AVX512BW, __m512i)
#endif
template <typename Callback>
INTGEMM_SSE2 static inline void RunCallback(Callback& callback_impl, dvector_t<CPUType::SSE2, int> total, Index row_idx, Index col_idx, Index rows, Index cols) {
callback_impl.Run(total.first, callbacks::OutputBufferInfo(row_idx, col_idx, rows, cols));
callback_impl.Run(total.second, callbacks::OutputBufferInfo(row_idx, col_idx + 4, rows, cols));
}
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX2
template <typename Callback>
INTGEMM_AVX2 static inline void RunCallback(Callback& callback_impl, vector_t<CPUType::AVX2, int> total, Index row_idx, Index col_idx, Index rows, Index cols) {
callback_impl.Run(total, callbacks::OutputBufferInfo(row_idx, col_idx, rows, cols));
}
#endif
// 16-bit multiplier for INTGEMM_SSE2, INTGEMM_AVX2, and AVX512.
// C = A * B * unquant_mult
//
// This has been substantially revised from Jacob Devlin's SSE code which is:
// Copyright (c) 2017 Microsoft Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// A is a row-major quantized matrix (from PrepareA)
// B is a rearranged quantized matrix (from PrepareB)
// C is output in row-major form.
//
// All of A, B, and C must be in aligned to a multiple of the register size:
// INTGEMM_SSE2: 16 bytes
// INTGEMM_AVX2: 32 bytes
// AVX512: 64 bytes.
//
// A_rows can be anything non-negative.
// width must be a multiple of the register size.
// B_cols must be a multiple of 8.
// Multiply16
#define INTGEMM_MULTIPLY16(Register, target, cpu_type) \
template <typename Callback> target static void Multiply(const int16_t *A, const int16_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { \
assert(width % (sizeof(Register) / sizeof(int16_t)) == 0); \
assert(B_cols % 8 == 0); \
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); \
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \
const Index simd_width = width / (sizeof(Register) / sizeof(int16_t)); \
auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \
INTGEMM_OMP_FOR \
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \
const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \
/* Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.*/ \
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { \
const Register *A_row = reinterpret_cast<const Register*>(A + A_rowidx * width); \
/* These will be packed 32-bit integers containing sums for each row of B multiplied by the row of A. \
Iterate over shared (inner) dimension.*/ \
Index k = 0; \
Register a = *(A_row + k); \
Register sum0 = madd_epi16(a, *(B0_col + k * 8)); \
Register sum1 = madd_epi16(a, *(B0_col + k * 8 + 1)); \
Register sum2 = madd_epi16(a, *(B0_col + k * 8 + 2)); \
Register sum3 = madd_epi16(a, *(B0_col + k * 8 + 3)); \
Register sum4 = madd_epi16(a, *(B0_col + k * 8 + 4)); \
Register sum5 = madd_epi16(a, *(B0_col + k * 8 + 5)); \
Register sum6 = madd_epi16(a, *(B0_col + k * 8 + 6)); \
Register sum7 = madd_epi16(a, *(B0_col + k * 8 + 7)); \
for (k = 1; k < simd_width; ++k) { \
a = *(A_row + k); \
/* Multiply 16-bit, horizontally add to packed 32-bit integers.*/ \
Register mult0 = madd_epi16(a, *(B0_col + k * 8)); \
Register mult1 = madd_epi16(a, *(B0_col + k * 8 + 1)); \
Register mult2 = madd_epi16(a, *(B0_col + k * 8 + 2)); \
Register mult3 = madd_epi16(a, *(B0_col + k * 8 + 3)); \
Register mult4 = madd_epi16(a, *(B0_col + k * 8 + 4)); \
Register mult5 = madd_epi16(a, *(B0_col + k * 8 + 5)); \
Register mult6 = madd_epi16(a, *(B0_col + k * 8 + 6)); \
Register mult7 = madd_epi16(a, *(B0_col + k * 8 + 7)); \
/* Sum packed 32-bit integers with danger of overflow. TODO: accumulate in 64-bit every so often.*/ \
sum0 = add_epi32(sum0, mult0); \
sum1 = add_epi32(sum1, mult1); \
sum2 = add_epi32(sum2, mult2); \
sum3 = add_epi32(sum3, mult3); \
sum4 = add_epi32(sum4, mult4); \
sum5 = add_epi32(sum5, mult5); \
sum6 = add_epi32(sum6, mult6); \
sum7 = add_epi32(sum7, mult7); \
} \
/* Reduce sums within 128-bit lanes.*/ \
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \
/*The specific implementation may need to reduce further.*/ \
auto total = PermuteSummer(pack0123, pack4567); \
RunCallback(callback_impl, total, A_rowidx, B0_colidx, A_rows, B_cols); \
} \
} \
} \
//An int8_prepbias version of the above code, using the add 127 technique
#define INTGEMM_PREPAREBIASFOR8(Register, target, cpu_type) \
template <class Callback> target static void PrepareBias(const int8_t *B, Index width, Index B_cols, Callback callback) { \
assert(width % (sizeof(Register) / sizeof(int8_t)) == 0); \
assert(B_cols % 8 == 0); \
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \
const Index simd_width = width / (sizeof(Register) / sizeof(int8_t)); \
auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \
const Register a = set1_epi8<Register>(1); \
INTGEMM_OMP_FOR \
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \
const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \
/*const Register *A_row = reinterpret_cast<const Register*>(A + A_rowidx * width);*/ \
/* These will be packed 16-bit integers containing sums for each row of B multiplied by the row of A. \
Iterate over shared (inner) dimension.*/ \
Index k = 0; \
Register sum0 = maddubs_epi16(a, *(B0_col + k * 8)); \
Register sum1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \
Register sum2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \
Register sum3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \
Register sum4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \
Register sum5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \
Register sum6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \
Register sum7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \
/* Upcast to 32-bit and horizontally add. Seems a bit faster if this is declared here.*/ \
Register ones = set1_epi16<Register>(1); \
sum0 = madd_epi16(sum0, ones); \
sum1 = madd_epi16(sum1, ones); \
sum2 = madd_epi16(sum2, ones); \
sum3 = madd_epi16(sum3, ones); \
sum4 = madd_epi16(sum4, ones); \
sum5 = madd_epi16(sum5, ones); \
sum6 = madd_epi16(sum6, ones); \
sum7 = madd_epi16(sum7, ones); \
for (k = 1; k < simd_width; ++k) { \
/*Register a = *(A_row + k);*/ \
/* Multiply 8-bit, horizontally add to packed 16-bit integers.*/ \
Register mult0 = maddubs_epi16(a, *(B0_col + k * 8)); \
Register mult1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \
Register mult2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \
Register mult3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \
Register mult4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \
Register mult5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \
Register mult6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \
Register mult7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \
/* Upcast to 32-bit and horizontally add.*/ \
mult0 = madd_epi16(mult0, ones); \
mult1 = madd_epi16(mult1, ones); \
mult2 = madd_epi16(mult2, ones); \
mult3 = madd_epi16(mult3, ones); \
mult4 = madd_epi16(mult4, ones); \
mult5 = madd_epi16(mult5, ones); \
mult6 = madd_epi16(mult6, ones); \
mult7 = madd_epi16(mult7, ones); \
/*Add in 32bit*/ \
sum0 = add_epi32(sum0, mult0); \
sum1 = add_epi32(sum1, mult1); \
sum2 = add_epi32(sum2, mult2); \
sum3 = add_epi32(sum3, mult3); \
sum4 = add_epi32(sum4, mult4); \
sum5 = add_epi32(sum5, mult5); \
sum6 = add_epi32(sum6, mult6); \
sum7 = add_epi32(sum7, mult7); \
\
} \
/* Reduce sums within 128-bit lanes.*/ \
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \
/*The specific implementation may need to reduce further.*/ \
auto total = PermuteSummer(pack0123, pack4567); \
RunCallback(callback_impl, total, 0, B0_colidx, 1, B_cols); \
} \
} \
//An int8 version of the above code, using the add 127 technique
#define INTGEMM_MULTIPLY8SHIFT(Register, target, cpu_type) \
template <class Callback> target static void Multiply8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { \
assert(width % (sizeof(Register) / sizeof(int8_t)) == 0); \
assert(B_cols % 8 == 0); \
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); \
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \
const Index simd_width = width / (sizeof(Register) / sizeof(int8_t)); \
auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \
INTGEMM_OMP_FOR \
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \
const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \
/* Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.*/ \
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { \
const Register *A_row = reinterpret_cast<const Register*>(A + A_rowidx * width); \
/* These will be packed 16-bit integers containing sums for each row of B multiplied by the row of A. \
Iterate over shared (inner) dimension.*/ \
Index k = 0; \
Register a = *(A_row + k); \
Register sum0 = maddubs_epi16(a, *(B0_col + k * 8)); \
Register sum1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \
Register sum2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \
Register sum3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \
Register sum4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \
Register sum5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \
Register sum6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \
Register sum7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \
/* Upcast to 32-bit and horizontally add. Seems a bit faster if this is declared here.*/ \
Register ones = set1_epi16<Register>(1); \
sum0 = madd_epi16(sum0, ones); \
sum1 = madd_epi16(sum1, ones); \
sum2 = madd_epi16(sum2, ones); \
sum3 = madd_epi16(sum3, ones); \
sum4 = madd_epi16(sum4, ones); \
sum5 = madd_epi16(sum5, ones); \
sum6 = madd_epi16(sum6, ones); \
sum7 = madd_epi16(sum7, ones); \
for (k = 1; k < simd_width; ++k) { \
a = *(A_row + k); \
/* Multiply 8-bit, horizontally add to packed 16-bit integers.*/ \
Register mult0 = maddubs_epi16(a, *(B0_col + k * 8)); \
Register mult1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \
Register mult2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \
Register mult3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \
Register mult4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \
Register mult5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \
Register mult6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \
Register mult7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \
/* Upcast to 32-bit and horizontally add.*/ \
mult0 = madd_epi16(mult0, ones); \
mult1 = madd_epi16(mult1, ones); \
mult2 = madd_epi16(mult2, ones); \
mult3 = madd_epi16(mult3, ones); \
mult4 = madd_epi16(mult4, ones); \
mult5 = madd_epi16(mult5, ones); \
mult6 = madd_epi16(mult6, ones); \
mult7 = madd_epi16(mult7, ones); \
/*Add in 32bit*/ \
sum0 = add_epi32(sum0, mult0); \
sum1 = add_epi32(sum1, mult1); \
sum2 = add_epi32(sum2, mult2); \
sum3 = add_epi32(sum3, mult3); \
sum4 = add_epi32(sum4, mult4); \
sum5 = add_epi32(sum5, mult5); \
sum6 = add_epi32(sum6, mult6); \
sum7 = add_epi32(sum7, mult7); \
\
} \
/* Reduce sums within 128-bit lanes.*/ \
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \
/*The specific implementation may need to reduce further.*/ \
auto total = PermuteSummer(pack0123, pack4567); \
RunCallback(callback_impl, total, A_rowidx, B0_colidx, A_rows, B_cols); \
} \
} \
} \
/* 8-bit matrix multiply used by AVX and AVX2.
* These have two peculiar properties:
* 1. The sign instructions don't exist in AVX512.
* 2. 16 registers means gcc's register allocation failed so I wrote it in my
* own asm.
* 3. They support 3-argument vpsignb and vpmaddubsw.
*
* Fun fact: AVX introduced the three-argument vpsignb and vpmaddubsw but only
* for 128-bit, despite the primary change in AVX being the addition of
* 256-bit. We had to wait for INTGEMM_AVX2 to get 256-bit versions of vpsignb and
* vpmaddubsw. That's why this code is generic over 128-bit or 256-bit.
*/
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX2
INTGEMM_AVX2 inline static void InnerINTGEMM_AVX2(
__m256i a, const __m256i *b,
__m256i &sum0, __m256i &sum1, __m256i &sum2, __m256i &sum3,
__m256i &sum4, __m256i &sum5, __m256i &sum6, __m256i &sum7) {
// Annoyingly the only 8-bit multiply is signed * unsigned (maddubs).
// So we take the sign bits off of a and apply them each b in a * b.
//
// We have only 16 YMM registers but we want to store:
// 1 for a (or |a|)
// 8 temporaries for applying sign to each column of B.
// 8 sums.
#if defined(__GNUC__) && !defined(__clang__)
// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94663
// gcc's register allocator does:
// 1 for a, do all the sign application, then overwrite with |a|
// 8 temporaries
// 7 sums in registers + 1 on the stack
//
// But it's possible to complete an operation early, freeing up its
// temporary register for reuse. But completing an operation early
// requires us to have |a| for vpmaddubsw while completing the later
// operation needs a again to apply sign.
//
// So we do two columns, 0 and 1, early. This allows b0_b6 and b1_b7
// to be reused by columns 6 and 7, respectively. And there's enough
// registers to store both a and |a|.
//
// These are the temporary variables used to process each column of b.
// We let the compiler choose which register number is which, but force
// it to allocate all registers.
__m256i absa;
__m256i b0_b6, b1_b7, b2, b3, b4, b5;
// Maybe this will tell gcc that we're accessing 8 registers starting
// at B_live. Though I doubt it because we're passing the address as a
// register.
typedef struct { __m256i x[8]; } B_range;
asm(
// Copy the first 6 columns of b to registers. We assume B has
// been rearranged so that these 8 columns are consecutive.
// vpsignb does not take a memory address as its second argument,
// so this can't be inlined into vsignb.
"vmovdqa (%[B]), %[b0_b6]\n"
"vmovdqa %c[size](%[B]), %[b1_b7]\n"
// These multiplies are executed by the assembler, not by the CPU
// at run time.
// I would have liked to just initialize b2 etc above but that
// would make it an input argument "+x" instead of "=&x". And +x
// counts as two operands for purposes of gcc's annoying 30-operand
// limit.
"vmovdqa 2*%c[size](%[B]), %[b2]\n"
"vmovdqa 3*%c[size](%[B]), %[b3]\n"
"vmovdqa 4*%c[size](%[B]), %[b4]\n"
"vmovdqa 5*%c[size](%[B]), %[b5]\n"
// Store the absolute value of a in absa.
"vpabsb %[a], %[absa]\n"
// If a byte of a is negative, negate the corresponding byte in
// b0_b6 etc.
"vpsignb %[a], %[b0_b6], %[b0_b6]\n"
"vpsignb %[a], %[b1_b7], %[b1_b7]\n"
// Multiply signed * unsigned then horizontally add to form packed
// 16-bit integers:
// b0[0] * |a|[0] + b0[1] * |a|[1], b0[2] * |a|[2] + b0[3] * |a|[3], ...
"vpmaddubsw %[b0_b6], %[absa], %[b0_b6]\n"
"vpmaddubsw %[b1_b7], %[absa], %[b1_b7]\n"
// vpmaddubsw has latency 5 so work on some other sign bits while
// we're at it.
"vpsignb %[a], %[b2], %[b2]\n"
"vpsignb %[a], %[b3], %[b3]\n"
"vpsignb %[a], %[b4], %[b4]\n"
"vpsignb %[a], %[b5], %[b5]\n"
// Perform a 16-bit add with saturation to accumlate sums.
"vpaddsw %[b0_b6], %[sum0], %[sum0]\n"
// Now we can reuse b0_b6 for b6
"vmovdqa 6*%c[size](%[B]), %[b0_b6]\n"
"vpaddsw %[b1_b7], %[sum1], %[sum1]\n"
// Now we can reuse b1_b7 for b7
"vmovdqa 7*%c[size](%[B]), %[b1_b7]\n"
// More crunching while the load happens.
"vpmaddubsw %[b2], %[absa], %[b2]\n"
"vpmaddubsw %[b3], %[absa], %[b3]\n"
"vpmaddubsw %[b4], %[absa], %[b4]\n"
"vpsignb %[a], %[b0_b6], %[b0_b6]\n"
"vpsignb %[a], %[b1_b7], %[b1_b7]\n"
"vpmaddubsw %[b5], %[absa], %[b5]\n"
"vpmaddubsw %[b0_b6], %[absa], %[b0_b6]\n"
"vpmaddubsw %[b1_b7], %[absa], %[b1_b7]\n"
"vpaddsw %[b2], %[sum2], %[sum2]\n"
"vpaddsw %[b3], %[sum3], %[sum3]\n"
"vpaddsw %[b4], %[sum4], %[sum4]\n"
"vpaddsw %[b5], %[sum5], %[sum5]\n"
"vpaddsw %[b0_b6], %[sum6], %[sum6]\n"
"vpaddsw %[b1_b7], %[sum7], %[sum7]\n"
: [sum0] "+x" (sum0),
[sum1] "+x" (sum1),
[sum2] "+x" (sum2),
[sum3] "+x" (sum3),
[sum4] "+x" (sum4),
[sum5] "+x" (sum5),
[sum6] "+x" (sum6),
[sum7] "+x" (sum7),
[b0_b6] "=&x" (b0_b6),
[b1_b7] "=&x" (b1_b7),
[b2] "=&x" (b2),
[b3] "=&x" (b3),
[b4] "=&x" (b4),
[b5] "=&x" (b5),
[absa] "=&x" (absa)
:
// I would like to use m here but that non-deterministically
// chooses %(eax) or -256$(eax) and there's no way to add to that
// memory address:
// https://gcc.gnu.org/ml/gcc-help/2011-04/msg00518.html
//
[B] "r" (reinterpret_cast<const B_range*>(b)),
[a] "x" (a),
[size] "i" (sizeof(__m256i))
);
#else
// https://bugs.llvm.org/show_bug.cgi?id=41482
// clang has a bug: target attribute avx2 doesn't allow inline assembly with
// +x for YMM registers. For example, this will not compile with default
// arguments:
// __attribute__ ((target ("avx2"))) void Foo(__m256i sum0) {
// asm("" : [sum0] "+x" (sum0));
// }
// but it will compile with -mavx2.
// However, clang does allow intrinsics and has a better register allocator
// than gcc. So here we just use intrinsics.
__m256i a_positive = abs_epi8(a);
sum0 = adds_epi16(sum0, maddubs_epi16(a_positive, sign_epi8(b[0], a)));
sum1 = adds_epi16(sum1, maddubs_epi16(a_positive, sign_epi8(b[1], a)));
sum2 = adds_epi16(sum2, maddubs_epi16(a_positive, sign_epi8(b[2], a)));
sum3 = adds_epi16(sum3, maddubs_epi16(a_positive, sign_epi8(b[3], a)));
sum4 = adds_epi16(sum4, maddubs_epi16(a_positive, sign_epi8(b[4], a)));
sum5 = adds_epi16(sum5, maddubs_epi16(a_positive, sign_epi8(b[5], a)));
sum6 = adds_epi16(sum6, maddubs_epi16(a_positive, sign_epi8(b[6], a)));
sum7 = adds_epi16(sum7, maddubs_epi16(a_positive, sign_epi8(b[7], a)));
#endif
}
#endif
// For INTGEMM_SSSE3 without AVX
INTGEMM_SSSE3 inline static void InnerINTGEMM_SSSE3(
__m128i a, const __m128i *b,
__m128i &sum0, __m128i &sum1, __m128i &sum2, __m128i &sum3,
__m128i &sum4, __m128i &sum5, __m128i &sum6, __m128i &sum7) {
__m128i a_positive = abs_epi8(a);
sum0 = adds_epi16(sum0, maddubs_epi16(a_positive, sign_epi8(b[0], a)));
sum1 = adds_epi16(sum1, maddubs_epi16(a_positive, sign_epi8(b[1], a)));
sum2 = adds_epi16(sum2, maddubs_epi16(a_positive, sign_epi8(b[2], a)));
sum3 = adds_epi16(sum3, maddubs_epi16(a_positive, sign_epi8(b[3], a)));
sum4 = adds_epi16(sum4, maddubs_epi16(a_positive, sign_epi8(b[4], a)));
sum5 = adds_epi16(sum5, maddubs_epi16(a_positive, sign_epi8(b[5], a)));
sum6 = adds_epi16(sum6, maddubs_epi16(a_positive, sign_epi8(b[6], a)));
sum7 = adds_epi16(sum7, maddubs_epi16(a_positive, sign_epi8(b[7], a)));
}
//INTGEMM_AVX2 or INTGEMM_SSSE3 multiply
#define INTGEMM_MULTIPLY8(Register, target, cpu_type) \
template <typename Callback> target static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { \
assert(width % sizeof(Register) == 0); \
assert(B_cols % 8 == 0); \
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); \
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \
const Index simd_width = width / sizeof(Register); \
auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \
INTGEMM_OMP_FOR \
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \
const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \
/*Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.*/ \
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { \
/*Iterate over shared (inner) dimension.*/ \
const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); \
const Register *A_end = A_live + simd_width; \
const Register *B_live = B0_col; \
/* Rather than initializing as zeros and adding, just initialize the first.*/ \
Register a = *(A_live++); \
Register a_positive = abs_epi8(a); \
/* These will be packed 16-bit integers containing sums for each column of B multiplied by the row of A.*/ \
Register sum0 = maddubs_epi16(a_positive, sign_epi8(B_live[0], a)); \
Register sum1 = maddubs_epi16(a_positive, sign_epi8(B_live[1], a)); \
Register sum2 = maddubs_epi16(a_positive, sign_epi8(B_live[2], a)); \
Register sum3 = maddubs_epi16(a_positive, sign_epi8(B_live[3], a)); \
Register sum4 = maddubs_epi16(a_positive, sign_epi8(B_live[4], a)); \
Register sum5 = maddubs_epi16(a_positive, sign_epi8(B_live[5], a)); \
Register sum6 = maddubs_epi16(a_positive, sign_epi8(B_live[6], a)); \
Register sum7 = maddubs_epi16(a_positive, sign_epi8(B_live[7], a)); \
B_live += 8; \
/* Use A as the loop variable so the add can be done where gcc likes it for branch prediction.*/ \
for (; A_live != A_end; ++A_live, B_live += 8) { \
Inner##target(*A_live, B_live, sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7); \
} \
/* Convert 16-bit to 32-bit and add, not caring what parts are added.
* Implementations:
* 1. https://github.com/tesseract-ocr/tesseract/blob/master/src/arch/intsimdmatrixavx2.cpp#L67 under Apache license:
* This does a multiply by 1 and horizontal add:
* _mm512_madd_epi16(sum, _mm512_set1_epi16(1))
* Current fastest.
*
* 2. Signed extension and fold halves:
* sum = _mm512_add_epi32(
* _mm512_cvtepi16_epi32(_mm512_castsi512_si256(sum)),
* _mm512_cvtepi16_epi32(_mm512_extracti64x4_epi64(sum, 1)));
*
* 3. Sign extend by abuse of bitshift, then add.
* sum = _mm512_add_epi32(
* _mm512_srai_epi32(_mm512_slli_epi32(sum, 16), 16),
* _mm512_srai_epi32(sum, 16));
*/ \
Register ones = set1_epi16<Register>(1); \
sum0 = madd_epi16(sum0, ones); \
sum1 = madd_epi16(sum1, ones); \
sum2 = madd_epi16(sum2, ones); \
sum3 = madd_epi16(sum3, ones); \
sum4 = madd_epi16(sum4, ones); \
sum5 = madd_epi16(sum5, ones); \
sum6 = madd_epi16(sum6, ones); \
sum7 = madd_epi16(sum7, ones); \
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \
auto total = PermuteSummer(pack0123, pack4567); \
RunCallback(callback_impl, total, A_rowidx, B0_colidx, A_rows, B_cols); \
} \
} \
}
/* Wrap a multiply call in OMP parallelism. Here it launches threads then
* inside the implementation there is a pragma omp for. In gcc >= 8 these
* could have been the same but older compilers don't imbue target attributes
* on the hidden function created by pragma omp parallel.
*
* Also, gcc 7 is unable to deduce the function pointer type (for ChooseCPU) if
* I use typename Backend::Integer directly in the arguments. As a workaround,
* have a default template argument Integer then use that so it's resolved.
*/
template <class Callback, class Backend, class Integer = typename Backend::Integer> static inline void OMPParallelWrap(const Integer *A, const Integer *B, Index A_rows, Index width, Index B_cols, Callback callback) {
#pragma omp parallel
Backend::template Multiply<Callback>(A, B, A_rows, width, B_cols, callback);
}
template <class Callback, class Backend> static inline void OMPParallelWrap8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) {
#pragma omp parallel
Backend::template Multiply8Shift<Callback>(A, B, A_rows, width, B_cols, callback);
}
} // namespace intgemm
|
mongodb_scram_fmt_plug.c | /*
* This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mongodb_scram;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mongodb_scram);
#else
#include <openssl/sha.h>
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "sha.h"
#include "base64_convert.h"
#include "hmac_sha.h"
#include "simd-intrinsics.h"
//#undef SIMD_COEF_32
#include "pbkdf2_hmac_sha1.h"
#include "md5.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#if defined SIMD_COEF_32
#define SIMD_KEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#define FORMAT_LABEL "scram"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SCRAM PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 125
#define HASH_LENGTH 28
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(uint32_t)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#if !defined(SIMD_COEF_32)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#else
#define MIN_KEYS_PER_CRYPT SIMD_KEYS
#define MAX_KEYS_PER_CRYPT SIMD_KEYS
#endif
#define FORMAT_TAG "$scram$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define MAX_USERNAME_LENGTH 128
static struct fmt_tests tests[] = {
{"$scram$someadmin$10000$wf42AF7JaU1NSeBaSmkKzw==$H6A5RF0qz6DrcWNNX4xe+wIeVEw=", "secret"},
{"$scram$admin$10000$ouQdw5om9Uc5gxulO9F/8w==$DSnATYsgoE8InL5Petfjp8MWGh4=", "test@12345"},
{NULL}
};
static struct custom_salt {
int saltlen;
int iterations;
char username[MAX_USERNAME_LENGTH + 1];
unsigned char salt[18 + 1]; /* base64 decoding, 24 / 4 * 3 = 18 */
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) /* username */
goto err;
if (strlen(p) >= MAX_USERNAME_LENGTH)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p)-2 != base64_valid_length(p, e_b64_mime, flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p) > 24)
goto err;
if ((p = strtokm(NULL, "")) == NULL) /* hash */
goto err;
if (strlen(p)-1 != base64_valid_length(p, e_b64_mime, flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p) > HASH_LENGTH)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy, *keeptr, *p;
memset(&cs, 0, sizeof(cs));
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LENGTH;
p = strtokm(ctcopy, "$");
strncpy(cs.username, p, 128);
p = strtokm(NULL, "$");
cs.iterations = atoi(p);
p = strtokm(NULL, "$");
base64_convert(p, e_b64_mime, strlen(p), (char*)cs.salt, e_b64_raw, sizeof(cs.salt), flg_Base64_NO_FLAGS, 0);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
base64_convert(p, e_b64_mime, strlen(p), (char*)out, e_b64_raw, sizeof(buf.c), flg_Base64_DONOT_NULL_TERMINATE, 0);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
inline static void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int index;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#if !defined (SIMD_COEF_32)
SHA_CTX ctx;
MD5_CTX mctx;
unsigned char hexhash[32];
unsigned char hash[16];
unsigned char out[BINARY_SIZE];
MD5_Init(&mctx);
MD5_Update(&mctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&mctx, ":mongo:", 7);
MD5_Update(&mctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &mctx);
hex_encode(hash, 16, hexhash);
pbkdf2_sha1(hexhash, 32, cur_salt->salt, 16,
cur_salt->iterations, out, BINARY_SIZE, 0);
hmac_sha1(out, BINARY_SIZE, (unsigned char*)"Client Key", 10, out, BINARY_SIZE);
SHA1_Init(&ctx);
SHA1_Update(&ctx, out, BINARY_SIZE);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
#else
SHA_CTX ctx;
MD5_CTX mctx;
int i;
unsigned char hexhash_[SIMD_KEYS][32], *hexhash[SIMD_KEYS];
unsigned char hash[16];
int lens[SIMD_KEYS];
unsigned char out_[SIMD_KEYS][BINARY_SIZE], *out[SIMD_KEYS];
for (i = 0; i < SIMD_KEYS; ++i) {
MD5_Init(&mctx);
MD5_Update(&mctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&mctx, ":mongo:", 7);
MD5_Update(&mctx, saved_key[index+i], strlen(saved_key[index+i]));
MD5_Final(hash, &mctx);
hexhash[i] = hexhash_[i];
hex_encode(hash, 16, hexhash[i]);
lens[i] = 32;
out[i] = out_[i];
}
pbkdf2_sha1_sse((const unsigned char **)hexhash, lens, cur_salt->salt, 16,
cur_salt->iterations, out, BINARY_SIZE, 0);
for (i = 0; i < SIMD_KEYS; ++i) {
hmac_sha1(out[i], BINARY_SIZE, (unsigned char*)"Client Key", 10, out[i], BINARY_SIZE);
SHA1_Init(&ctx);
SHA1_Update(&ctx, out[i], BINARY_SIZE);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mongodb_scram = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__lxor_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint8)
// A*D function (colscale): GB (_AxD__lxor_uint8)
// D*A function (rowscale): GB (_DxB__lxor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint8)
// C=scalar+B GB (_bind1st__lxor_uint8)
// C=scalar+B' GB (_bind1st_tran__lxor_uint8)
// C=A+scalar GB (_bind2nd__lxor_uint8)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT8 || GxB_NO_LXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
atom_symmetry_class.h | // Copyright (c) 2013-2016 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file atom_symmetry_class.h
*
* \brief Contains declaration and partial implementation of sirius::Atom_symmetry_class class.
*/
#ifndef __ATOM_SYMMETRY_CLASS_H__
#define __ATOM_SYMMETRY_CLASS_H__
#include "sirius_io.h"
#include "atom_type.h"
#include "communicator.hpp"
#include "eigenproblem.h"
namespace sirius {
/// Data and methods specific to the symmetry class of the atom.
/** Atoms transforming into each other under symmetry opeartions belong to the same symmetry class. They have the
* same spherical part of the on-site potential and, as a consequence, the same radial functions.
*/
class Atom_symmetry_class
{
private:
/// Symmetry class id in the range [0, N_class).
int id_;
/// List of atoms of this class.
std::vector<int> atom_id_;
/// Pointer to atom type.
Atom_type const& atom_type_;
/// Spherical part of the effective potential.
std::vector<double> spherical_potential_;
/// List of radial functions for the LAPW basis.
/** This array stores all the radial functions (AW and LO) and their derivatives. Radial derivatives of functions
* are multiplied by \f$ x \f$.\n
* 1-st dimension: index of radial point \n
* 2-nd dimension: index of radial function \n
* 3-nd dimension: 0 - function itself, 1 - radial derivative */
mdarray<double, 3> radial_functions_;
/// Surface derivatives of AW radial functions.
mdarray<double, 3> aw_surface_derivatives_;
/// Spherical part of radial integral.
mdarray<double, 2> h_spherical_integrals_;
/// Overlap integrals.
mdarray<double, 3> o_radial_integrals_;
/// Overlap integrals for IORA relativistic treatment.
mdarray<double, 2> o1_radial_integrals_;
/// Spin-orbit interaction integrals.
mdarray<double, 3> so_radial_integrals_;
/// Core charge density.
std::vector<double> core_charge_density_;
/// Core eigen-value sum.
double core_eval_sum_{0};
/// Core leakage.
double core_leakage_{0};
/// list of radial descriptor sets used to construct augmented waves
mutable std::vector<radial_solution_descriptor_set> aw_descriptors_;
/// list of radial descriptor sets used to construct local orbitals
mutable std::vector<local_orbital_descriptor> lo_descriptors_;
/// Generate radial functions for augmented waves
inline void generate_aw_radial_functions(relativity_t rel__);
/// Generate local orbital raidal functions
inline void generate_lo_radial_functions(relativity_t rel__);
public:
/// Constructor
Atom_symmetry_class(int id_, Atom_type const& atom_type_)
: id_(id_)
, atom_type_(atom_type_)
{
if (!atom_type_.initialized()) {
TERMINATE("atom type is not initialized");
}
aw_surface_derivatives_ = mdarray<double, 3>(atom_type_.max_aw_order(), atom_type_.num_aw_descriptors(), 3);
radial_functions_ = mdarray<double, 3>(atom_type_.num_mt_points(), atom_type_.mt_radial_basis_size(), 2);
h_spherical_integrals_ = mdarray<double, 2>(atom_type_.mt_radial_basis_size(), atom_type_.mt_radial_basis_size());
h_spherical_integrals_.zero();
o_radial_integrals_ = mdarray<double, 3>(atom_type_.indexr().lmax() + 1, atom_type_.indexr().max_num_rf(),
atom_type_.indexr().max_num_rf());
o_radial_integrals_.zero();
so_radial_integrals_ = mdarray<double, 3>(atom_type_.indexr().lmax() + 1, atom_type_.indexr().max_num_rf(),
atom_type_.indexr().max_num_rf());
so_radial_integrals_.zero();
if (atom_type_.parameters().valence_relativity() == relativity_t::iora) {
o1_radial_integrals_ = mdarray<double, 2>(atom_type_.mt_radial_basis_size(), atom_type_.mt_radial_basis_size());
o1_radial_integrals_.zero();
}
/* copy descriptors because enu is defferent between atom classes */
aw_descriptors_.resize(atom_type_.num_aw_descriptors());
for (int i = 0; i < num_aw_descriptors(); i++) {
aw_descriptors_[i] = atom_type_.aw_descriptor(i);
}
lo_descriptors_.resize(atom_type_.num_lo_descriptors());
for (int i = 0; i < num_lo_descriptors(); i++) {
lo_descriptors_[i] = atom_type_.lo_descriptor(i);
}
core_charge_density_.resize(atom_type_.num_mt_points());
std::memset(&core_charge_density_[0], 0, atom_type_.num_mt_points() * sizeof(double));
}
/// Set the spherical component of the potential
/** Atoms belonging to the same symmetry class have the same spherical potential. */
inline void set_spherical_potential(std::vector<double> const& vs__);
inline void generate_radial_functions(relativity_t rel__);
inline void sync_radial_functions(Communicator const& comm__, int const rank__);
inline void sync_radial_integrals(Communicator const& comm__, int const rank__);
inline void sync_core_charge_density(Communicator const& comm__, int const rank__);
/// Check if local orbitals are linearly independent
inline std::vector<int> check_lo_linear_independence(double etol__);
/// Dump local orbitals to the file for debug purposes
inline void dump_lo();
/// Find core states and generate core density.
inline void generate_core_charge_density(relativity_t core_rel__);
inline void find_enu(relativity_t rel__);
inline void write_enu(runtime::pstdout& pout) const;
/// Generate radial overlap and SO integrals
/** In the case of spin-orbit interaction the following integrals are computed:
* \f[
* \int f_{p}(r) \Big( \frac{1}{(2 M c)^2} \frac{1}{r} \frac{d V}{d r} \Big) f_{p'}(r) r^2 dr
* \f]
*
* Relativistic mass M is defined as
* \f[
* M = 1 - \frac{1}{2 c^2} V
* \f]
*/
inline void generate_radial_integrals(relativity_t rel__);
/// Compute m-th order radial derivative at the MT surface.
inline double aw_surface_dm(int l, int order, int dm) const
{
assert(dm <= 2);
return aw_surface_derivatives_(order, l, dm);
}
inline void set_aw_surface_deriv(int l, int order, int dm, double deriv)
{
assert(dm <= 2);
aw_surface_derivatives_(order, l, dm) = deriv;
}
/// Return symmetry class id.
inline int id() const
{
return id_;
}
/// Add atom id to the current class.
inline void add_atom_id(int atom_id__)
{
atom_id_.push_back(atom_id__);
}
/// Return number of atoms belonging to the current symmetry class.
inline int num_atoms() const
{
return static_cast<int>(atom_id_.size());
}
inline int atom_id(int idx) const
{
return atom_id_[idx];
}
/// Get a value of the radial functions.
inline double radial_function(int ir, int idx) const
{
return radial_functions_(ir, idx, 0);
}
/// Get a reference to the value of the radial function.
inline double& radial_function(int ir, int idx)
{
return radial_functions_(ir, idx, 0);
}
/// Get a value of the radial function derivative.
inline double radial_function_derivative(int ir, int idx) const
{
return radial_functions_(ir, idx, 1);
}
/// Get a reference to the value of the radial function derivative.
inline double& radial_function_derivative(int ir, int idx)
{
return radial_functions_(ir, idx, 1);
}
inline double h_spherical_integral(int i1, int i2) const
{
return h_spherical_integrals_(i1, i2);
}
inline double const& o_radial_integral(int l, int order1, int order2) const
{
return o_radial_integrals_(l, order1, order2);
}
inline void set_o_radial_integral(int l, int order1, int order2, double oint__)
{
o_radial_integrals_(l, order1, order2) = oint__;
}
inline double const& o1_radial_integral(int xi1__, int xi2__) const
{
return o1_radial_integrals_(xi1__, xi2__);
}
inline void set_o1_radial_integral(int idxrf1__, int idxrf2__, double val__)
{
o1_radial_integrals_(idxrf1__, idxrf2__) = val__;
}
inline double so_radial_integral(int l, int order1, int order2) const
{
return so_radial_integrals_(l, order1, order2);
}
inline double core_charge_density(int ir) const
{
assert(ir >= 0 && ir < (int)core_charge_density_.size());
return core_charge_density_[ir];
}
inline Atom_type const& atom_type() const
{
return atom_type_;
}
inline double core_eval_sum() const
{
return core_eval_sum_;
}
inline double core_leakage() const
{
return core_leakage_;
}
inline int num_aw_descriptors() const
{
return static_cast<int>(aw_descriptors_.size());
}
inline radial_solution_descriptor_set& aw_descriptor(int idx__) const
{
return aw_descriptors_[idx__];
}
inline int num_lo_descriptors() const
{
return static_cast<int>(lo_descriptors_.size());
}
inline local_orbital_descriptor& lo_descriptor(int idx__) const
{
return lo_descriptors_[idx__];
}
inline void set_aw_enu(int l, int order, double enu)
{
aw_descriptors_[l][order].enu = enu;
}
inline double get_aw_enu(int l, int order) const
{
return aw_descriptors_[l][order].enu;
}
inline void set_lo_enu(int idxlo, int order, double enu)
{
lo_descriptors_[idxlo].rsd_set[order].enu = enu;
}
inline double get_lo_enu(int idxlo, int order) const
{
return lo_descriptors_[idxlo].rsd_set[order].enu;
}
};
inline void Atom_symmetry_class::generate_aw_radial_functions(relativity_t rel__)
{
int nmtp = atom_type_.num_mt_points();
Radial_solver solver(atom_type_.zn(), spherical_potential_, atom_type_.radial_grid());
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
std::vector<double> p;
std::vector<double> rdudr;
std::array<double, 2> uderiv;
#pragma omp for schedule(dynamic, 1)
for (int l = 0; l < num_aw_descriptors(); l++) {
for (int order = 0; order < (int)aw_descriptor(l).size(); order++) {
auto rsd = aw_descriptor(l)[order];
int idxrf = atom_type_.indexr().index_by_l_order(l, order);
solver.solve(rel__, rsd.dme, rsd.l, rsd.enu, p, rdudr, uderiv);
/* normalize */
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = std::pow(p[ir], 2);
}
double norm = 1.0 / std::sqrt(s.interpolate().integrate(0));
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) = p[ir] * norm;
radial_functions_(ir, idxrf, 1) = rdudr[ir] * norm;
}
aw_surface_derivatives_(order, l, 0) = norm * p.back() / atom_type_.mt_radius();
for (int i: {0, 1}) {
aw_surface_derivatives_(order, l, i + 1) = uderiv[i] * norm;
}
/* orthogonalize to previous radial functions */
for (int order1 = 0; order1 < order; order1++) {
int idxrf1 = atom_type_.indexr().index_by_l_order(l, order1);
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = radial_functions_(ir, idxrf, 0) * radial_functions_(ir, idxrf1, 0);
}
/* <u_{\nu'}|u_{\nu}> */
double ovlp = s.interpolate().integrate(0);
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) -= radial_functions_(ir, idxrf1, 0) * ovlp;
radial_functions_(ir, idxrf, 1) -= radial_functions_(ir, idxrf1, 1) * ovlp;
}
for (int i: {0, 1, 2}) {
aw_surface_derivatives_(order, l, i) -= aw_surface_derivatives_(order1, l, i) * ovlp;
}
}
/* normalize again */
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = std::pow(radial_functions_(ir, idxrf, 0), 2);
}
norm = s.interpolate().integrate(0);
if (std::abs(norm) < 1e-10) {
TERMINATE("aw radial functions are linearly dependent");
}
norm = 1.0 / std::sqrt(norm);
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) *= norm;
radial_functions_(ir, idxrf, 1) *= norm;
}
for (int i: {0, 1, 2}) {
aw_surface_derivatives_(order, l, i) *= norm;
}
}
/* divide by r */
for (int order = 0; order < (int)aw_descriptor(l).size(); order++) {
int idxrf = atom_type_.indexr().index_by_l_order(l, order);
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) *= atom_type_.radial_grid().x_inv(ir);
}
}
}
}
}
inline void Atom_symmetry_class::generate_lo_radial_functions(relativity_t rel__)
{
int nmtp = atom_type_.num_mt_points();
Radial_solver solver(atom_type_.zn(), spherical_potential_, atom_type_.radial_grid());
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
double a[3][3];
#pragma omp for schedule(dynamic, 1)
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
/* number of radial solutions */
int num_rs = static_cast<int>(lo_descriptor(idxlo).rsd_set.size());
assert(num_rs <= 3);
std::vector<std::vector<double>> p(num_rs);
std::vector<std::vector<double>> rdudr(num_rs);
std::array<double, 2> uderiv;
for (int order = 0; order < num_rs; order++) {
auto rsd = lo_descriptor(idxlo).rsd_set[order];
solver.solve(rel__, rsd.dme, rsd.l, rsd.enu, p[order], rdudr[order], uderiv);
/* find norm of the radial solution */
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = std::pow(p[order][ir], 2);
}
double norm = 1.0 / std::sqrt(s.interpolate().integrate(0));
/* normalize radial solution and divide by r */
for (int ir = 0; ir < nmtp; ir++) {
p[order][ir] *= (norm * atom_type_.radial_grid().x_inv(ir));
/* don't divide rdudr by r */
rdudr[order][ir] *= norm;
}
uderiv[0] *= norm;
uderiv[1] *= norm;
/* matrix of derivatives */
a[order][0] = p[order].back();
a[order][1] = uderiv[0];
a[order][2] = uderiv[1];
}
double b[] = {0, 0, 0};
b[num_rs - 1] = 1.0;
int info = linalg<CPU>::gesv(num_rs, 1, &a[0][0], 3, b, 3);
if (info) {
std::stringstream s;
s << "a[i][j] = ";
for (int i = 0; i < num_rs; i++) {
for (int j = 0; j < num_rs; j++) {
s << a[i][j] << " ";
}
}
s << std::endl;
s << "atom: " << atom_type_.label() << std::endl
<< "zn: " << atom_type_.zn() << std::endl
<< "l: " << lo_descriptor(idxlo).l << std::endl;
s << "gesv returned " << info;
TERMINATE(s);
}
/* index of local orbital radial function */
int idxrf = atom_type_.indexr().index_by_idxlo(idxlo);
/* take linear combination of radial solutions */
for (int order = 0; order < num_rs; order++) {
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) += b[order] * p[order][ir];
radial_functions_(ir, idxrf, 1) += b[order] * rdudr[order][ir];
}
}
/* find norm of constructed local orbital */
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = std::pow(radial_functions_(ir, idxrf, 0), 2);
}
double norm = 1.0 / std::sqrt(s.interpolate().integrate(2));
/* normalize */
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) *= norm;
radial_functions_(ir, idxrf, 1) *= norm;
}
if (std::abs(radial_functions_(nmtp - 1, idxrf, 0)) > 1e-10) {
std::stringstream s;
s << "local orbital " << idxlo << " is not zero at MT boundary" << std::endl
<< " atom symmetry class id : " << id() << " (" << atom_type().symbol() << ")" << std::endl
<< " value : " << radial_functions_(nmtp - 1, idxrf, 0) << std::endl
<< " number of MT points: " << nmtp << std::endl
<< " MT radius: " << atom_type_.radial_grid().last() << std::endl
<< " b_coeffs: ";
for (int j = 0; j < num_rs; j++) {
s << b[j] << " ";
}
WARNING(s);
}
}
}
if (atom_type_.parameters().control().verification_ > 0 && num_lo_descriptors() > 0) {
check_lo_linear_independence(0.0001);
}
}
inline std::vector<int> Atom_symmetry_class::check_lo_linear_independence(double tol__)
{
int nmtp = atom_type_.num_mt_points();
Spline<double> s(atom_type_.radial_grid());
mdarray<double, 2> loprod(num_lo_descriptors(), num_lo_descriptors());
loprod.zero();
for (int idxlo1 = 0; idxlo1 < num_lo_descriptors(); idxlo1++) {
int idxrf1 = atom_type_.indexr().index_by_idxlo(idxlo1);
for (int idxlo2 = 0; idxlo2 < num_lo_descriptors(); idxlo2++) {
int idxrf2 = atom_type_.indexr().index_by_idxlo(idxlo2);
if (lo_descriptor(idxlo1).l == lo_descriptor(idxlo2).l) {
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0);
}
loprod(idxlo1, idxlo2) = s.interpolate().integrate(2);
}
}
}
mdarray<double, 2> ovlp(num_lo_descriptors(), num_lo_descriptors());
loprod >> ovlp;
Eigenproblem_lapack stdevp;
std::vector<double> loprod_eval(num_lo_descriptors());
mdarray<double, 2> loprod_evec(num_lo_descriptors(), num_lo_descriptors());
stdevp.solve(num_lo_descriptors(), loprod.at<CPU>(), loprod.ld(), &loprod_eval[0],
loprod_evec.at<CPU>(), loprod_evec.ld());
if (std::abs(loprod_eval[0]) < tol__) {
printf("\n");
printf("local orbitals for atom symmetry class %i are almost linearly dependent\n", id_);
printf("local orbitals overlap matrix:\n");
for (int i = 0; i < num_lo_descriptors(); i++) {
for (int j = 0; j < num_lo_descriptors(); j++) {
printf("%12.6f", ovlp(i, j));
}
printf("\n");
}
printf("overlap matrix eigen-values:\n");
for (int i = 0; i < num_lo_descriptors(); i++) {
printf("%12.6f", loprod_eval[i]);
}
printf("\n");
printf("smallest eigenvalue: %20.16f\n", loprod_eval[0]);
}
std::vector<int> inc(num_lo_descriptors(), 0);
/* try all local orbitals */
for (int i = 0; i < num_lo_descriptors(); i++) {
inc[i] = 1;
std::vector<int> ilo;
for (int j = 0; j < num_lo_descriptors(); j++) {
if (inc[j] == 1) {
ilo.push_back(j);
}
}
std::vector<double> eval(ilo.size());
mdarray<double, 2> evec(ilo.size(), ilo.size());
mdarray<double, 2> tmp(ilo.size(), ilo.size());
for (size_t j1 = 0; j1 < ilo.size(); j1++) {
for (size_t j2 = 0; j2 < ilo.size(); j2++) {
tmp(j1, j2) = ovlp(ilo[j1], ilo[j2]);
}
}
stdevp.solve(static_cast<int>(ilo.size()), tmp.at<CPU>(), tmp.ld(), &eval[0], evec.at<CPU>(), evec.ld());
if (eval[0] < tol__) {
printf("local orbital %i can be removed\n", i);
inc[i] = 0;
}
}
return inc;
}
inline void Atom_symmetry_class::dump_lo()
{
std::stringstream s;
s << "local_orbitals_" << id_ << ".dat";
FILE* fout = fopen(s.str().c_str(), "w");
for (int ir = 0; ir <atom_type_.num_mt_points(); ir++) {
fprintf(fout, "%f ", atom_type_.radial_grid(ir));
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
int idxrf = atom_type_.indexr().index_by_idxlo(idxlo);
fprintf(fout, "%f ", radial_functions_(ir, idxrf, 0));
}
fprintf(fout, "\n");
}
fclose(fout);
s.str("");
s << "local_orbitals_deriv_" << id_ << ".dat";
fout = fopen(s.str().c_str(), "w");
for (int ir = 0; ir <atom_type_.num_mt_points(); ir++) {
fprintf(fout, "%f ", atom_type_.radial_grid(ir));
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
int idxrf = atom_type_.indexr().index_by_idxlo(idxlo);
fprintf(fout, "%f ", radial_functions_(ir, idxrf, 1));
}
fprintf(fout, "\n");
}
fclose(fout);
}
inline void Atom_symmetry_class::set_spherical_potential(std::vector<double> const& vs__)
{
if (atom_type_.num_mt_points() != (int)vs__.size()) {
TERMINATE("wrong size of effective potential array");
}
spherical_potential_ = vs__;
//HDF5_tree fout("mt_potential.h5", true);
//fout.write("potential", spherical_potential_);
///* write spherical potential */
//std::stringstream sstr;
//sstr << "mt_spheric_potential_" << id_ << ".dat";
//FILE* fout = fopen(sstr.str().c_str(), "w");
//for (int ir = 0; ir < atom_type_.num_mt_points(); ir++) {
// double r = atom_type_.radial_grid(ir);
// fprintf(fout, "%20.10f %20.10f \n", r, spherical_potential_[ir] + atom_type_.zn() / r);
//}
//fclose(fout);
}
inline void Atom_symmetry_class::find_enu(relativity_t rel__)
{
PROFILE("sirius::Atom_symmetry_class::find_enu");
std::vector<radial_solution_descriptor*> rs_with_auto_enu;
/* find which aw functions need auto enu */
for (int l = 0; l < num_aw_descriptors(); l++) {
for (size_t order = 0; order < aw_descriptor(l).size(); order++) {
auto& rsd = aw_descriptor(l)[order];
if (rsd.auto_enu) {
rs_with_auto_enu.push_back(&rsd);
}
}
}
/* find which lo functions need auto enu */
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
/* number of radial solutions */
size_t num_rs = lo_descriptor(idxlo).rsd_set.size();
for (size_t order = 0; order < num_rs; order++) {
auto& rsd = lo_descriptor(idxlo).rsd_set[order];
if (rsd.auto_enu) {
rs_with_auto_enu.push_back(&rsd);
}
}
}
#pragma omp parallel for
for (size_t i = 0; i < rs_with_auto_enu.size(); i++) {
auto rsd = rs_with_auto_enu[i];
rsd->enu = Enu_finder(rel__, atom_type_.zn(), rsd->n, rsd->l, atom_type_.radial_grid(), spherical_potential_, rsd->enu).enu();
}
}
inline void Atom_symmetry_class::generate_radial_functions(relativity_t rel__)
{
PROFILE("sirius::Atom_symmetry_class::generate_radial_functions");
radial_functions_.zero();
find_enu(rel__);
generate_aw_radial_functions(rel__);
generate_lo_radial_functions(rel__);
#ifdef __PRINT_OBJECT_CHECKSUM
DUMP("checksum(spherical_potential): %18.10f", mdarray<double, 1>(spherical_potential_.data(), atom_type_.num_mt_points()).checksum());
DUMP("checksum(radial_functions): %18.10f", radial_functions_.checksum());
#endif
//** if (true)
//** {
//** std::stringstream s;
//** s << "radial_functions_" << id_ << ".dat";
//** FILE* fout = fopen(s.str().c_str(), "w");
//** for (int ir = 0; ir <atom_type_.num_mt_points(); ir++)
//** {
//** fprintf(fout, "%f ", atom_type_.radial_grid(ir));
//** for (int idxrf = 0; idxrf < atom_type_.indexr().size(); idxrf++)
//** {
//** fprintf(fout, "%f ", radial_functions_(ir, idxrf, 0));
//** }
//** fprintf(fout, "\n");
//** }
//** fclose(fout);
//** }
//** STOP();
}
inline void Atom_symmetry_class::sync_radial_functions(Communicator const& comm__, int const rank__)
{
/* don't broadcast Hamiltonian radial functions, because they are used locally */
int size = (int)(radial_functions_.size(0) * radial_functions_.size(1));
comm__.bcast(radial_functions_.at<CPU>(), size, rank__);
comm__.bcast(aw_surface_derivatives_.at<CPU>(), (int)aw_surface_derivatives_.size(), rank__);
// TODO: sync enu to pass to Exciting / Elk
}
inline void Atom_symmetry_class::sync_radial_integrals(Communicator const& comm__, int const rank__)
{
comm__.bcast(h_spherical_integrals_.at<CPU>(), (int)h_spherical_integrals_.size(), rank__);
comm__.bcast(o_radial_integrals_.at<CPU>(), (int)o_radial_integrals_.size(), rank__);
comm__.bcast(so_radial_integrals_.at<CPU>(), (int)so_radial_integrals_.size(), rank__);
if (atom_type_.parameters().valence_relativity() == relativity_t::iora) {
comm__.bcast(o1_radial_integrals_.at<CPU>(), (int)o1_radial_integrals_.size(), rank__);
}
}
inline void Atom_symmetry_class::sync_core_charge_density(Communicator const& comm__, int const rank__)
{
assert(core_charge_density_.size() != 0);
comm__.bcast(&core_charge_density_[0], atom_type_.radial_grid().num_points(), rank__);
comm__.bcast(&core_leakage_, 1, rank__);
comm__.bcast(&core_eval_sum_, 1, rank__);
}
inline void Atom_symmetry_class::generate_radial_integrals(relativity_t rel__)
{
PROFILE("sirius::Atom_symmetry_class::generate_radial_integrals");
int nmtp = atom_type_.num_mt_points();
double sq_alpha_half = 0.5 * std::pow(speed_of_light, -2);
if (rel__ == relativity_t::none) {
sq_alpha_half = 0;
}
h_spherical_integrals_.zero();
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
#pragma omp for
for (int i1 = 0; i1 < atom_type_.mt_radial_basis_size(); i1++) {
for (int i2 = 0; i2 < atom_type_.mt_radial_basis_size(); i2++) {
/* for spherical part of potential integrals are diagonal in l */
if (atom_type_.indexr(i1).l == atom_type_.indexr(i2).l) {
int ll = atom_type_.indexr(i1).l * (atom_type_.indexr(i1).l + 1);
for (int ir = 0; ir < nmtp; ir++) {
double Minv = 1.0 / (1 - spherical_potential_[ir] * sq_alpha_half);
/* u_1(r) * u_2(r) */
double t0 = radial_functions_(ir, i1, 0) * radial_functions_(ir, i2, 0);
/* r*u'_1(r) * r*u'_2(r) */
double t1 = radial_functions_(ir, i1, 1) * radial_functions_(ir, i2, 1);
s[ir] = 0.5 * t1 * Minv + t0 * (0.5 * ll * Minv + spherical_potential_[ir] * std::pow(atom_type_.radial_grid(ir), 2));
}
h_spherical_integrals_(i1, i2) = s.interpolate().integrate(0) / y00;
}
}
}
}
o_radial_integrals_.zero();
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
#pragma omp for
for (int l = 0; l <= atom_type_.indexr().lmax(); l++) {
int nrf = atom_type_.indexr().num_rf(l);
for (int order1 = 0; order1 < nrf; order1++) {
int idxrf1 = atom_type_.indexr().index_by_l_order(l, order1);
for (int order2 = 0; order2 < nrf; order2++) {
int idxrf2 = atom_type_.indexr().index_by_l_order(l, order2);
if (order1 == order2) {
o_radial_integrals_(l, order1, order2) = 1.0;
} else {
for (int ir = 0; ir < nmtp; ir++) {
s[ir] = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0);
}
o_radial_integrals_(l, order1, order2) = s.interpolate().integrate(2);
}
}
}
}
}
if (atom_type_.parameters().valence_relativity() == relativity_t::iora) {
o1_radial_integrals_.zero();
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
#pragma omp for
for (int i1 = 0; i1 < atom_type_.mt_radial_basis_size(); i1++) {
for (int i2 = 0; i2 < atom_type_.mt_radial_basis_size(); i2++) {
/* for spherical part of potential integrals are diagonal in l */
if (atom_type_.indexr(i1).l == atom_type_.indexr(i2).l) {
int ll = atom_type_.indexr(i1).l * (atom_type_.indexr(i1).l + 1);
for (int ir = 0; ir < nmtp; ir++) {
double Minv = std::pow(1 - spherical_potential_[ir] * sq_alpha_half, -2);
/* u_1(r) * u_2(r) */
double t0 = radial_functions_(ir, i1, 0) * radial_functions_(ir, i2, 0);
/* r*u'_1(r) * r*u'_2(r) */
double t1 = radial_functions_(ir, i1, 1) * radial_functions_(ir, i2, 1);
s[ir] = sq_alpha_half * 0.5 * Minv * (t1 + t0 * 0.5 * ll);
}
o1_radial_integrals_(i1, i2) = s.interpolate().integrate(0);
}
}
}
}
}
if (false) // TODO: if it's slow, compute only when spin-orbit is turned on
{
double soc = std::pow(2 * speed_of_light, -2);
Spline<double> s(atom_type_.radial_grid());
Spline<double> s1(atom_type_.radial_grid());
Spline<double> ve(atom_type_.radial_grid());
for (int i = 0; i < nmtp; i++) ve[i] = spherical_potential_[i] + atom_type_.zn() / atom_type_.radial_grid(i);
ve.interpolate();
so_radial_integrals_.zero();
for (int l = 0; l <= atom_type_.indexr().lmax(); l++)
{
int nrf = atom_type_.indexr().num_rf(l);
for (int order1 = 0; order1 < nrf; order1++)
{
int idxrf1 = atom_type_.indexr().index_by_l_order(l, order1);
for (int order2 = 0; order2 < nrf; order2++)
{
int idxrf2 = atom_type_.indexr().index_by_l_order(l, order2);
for (int ir = 0; ir < nmtp; ir++)
{
double M = 1.0 - 2 * soc * spherical_potential_[ir];
/* first part <f| dVe / dr |f'> */
s[ir] = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0) *
soc * ve.deriv(1, ir) / pow(M, 2);
/* second part <f| d(z/r) / dr |f'> */
s1[ir] = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0) *
soc * atom_type_.zn() / pow(M, 2);
}
s.interpolate();
s1.interpolate();
so_radial_integrals_(l, order1, order2) = s.integrate(1) + s1.integrate(-1);
}
}
}
}
}
inline void Atom_symmetry_class::write_enu(runtime::pstdout& pout) const
{
pout.printf("Atom : %s, class id : %i\n", atom_type_.symbol().c_str(), id_);
pout.printf("augmented waves\n");
for (int l = 0; l < num_aw_descriptors(); l++) {
for (size_t order = 0; order < aw_descriptor(l).size(); order++) {
auto& rsd = aw_descriptor(l)[order];
if (rsd.auto_enu) {
pout.printf("n = %2i l = %2i order = %i enu = %12.6f\n", rsd.n, rsd.l, order, rsd.enu);
}
}
}
pout.printf("local orbitals\n");
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
for (size_t order = 0; order < lo_descriptor(idxlo).rsd_set.size(); order++) {
auto& rsd = lo_descriptor(idxlo).rsd_set[order];
if (rsd.auto_enu) {
pout.printf("n = %2i l = %2i order = %i enu = %12.6f\n", rsd.n, rsd.l, order, rsd.enu);
}
}
}
pout.printf("\n");
}
inline void Atom_symmetry_class::generate_core_charge_density(relativity_t core_rel__)
{
PROFILE("sirius::Atom_symmetry_class::generate_core_charge_density");
/* nothing to do */
if (atom_type_.num_core_electrons() == 0.0) {
return;
}
int nmtp = atom_type_.num_mt_points();
std::vector<double> free_atom_grid(nmtp);
for (int i = 0; i < nmtp; i++) {
free_atom_grid[i] = atom_type_.radial_grid(i);
}
/* extend radial grid */
double x = atom_type_.radial_grid(nmtp - 1);
double dx = atom_type_.radial_grid().dx(nmtp - 2);
while (x < 30.0 + atom_type_.zn() / 4.0) {
x += dx;
free_atom_grid.push_back(x);
dx *= 1.025;
}
Radial_grid rgrid(free_atom_grid);
/* interpolate spherical potential inside muffin-tin */
Spline<double> svmt(atom_type_.radial_grid());
/* remove nucleus contribution from Vmt */
for (int ir = 0; ir < nmtp; ir++) {
svmt[ir] = spherical_potential_[ir] + atom_type_.zn() * atom_type_.radial_grid().x_inv(ir);
}
svmt.interpolate();
/* fit tail to alpha/r + beta */
double alpha = -(std::pow(atom_type_.mt_radius(), 2) * svmt.deriv(1, nmtp - 1) + atom_type_.zn());
double beta = svmt[nmtp - 1] - (atom_type_.zn() + alpha) / atom_type_.mt_radius();
/* cook an effective potential from muffin-tin part and a tail */
std::vector<double> veff(rgrid.num_points());
for (int ir = 0; ir < nmtp; ir++) {
veff[ir] = spherical_potential_[ir];
}
/* simple tail alpha/r + beta */
for (int ir = nmtp; ir < rgrid.num_points(); ir++) {
veff[ir] = alpha * rgrid.x_inv(ir) + beta;
}
//== /* write spherical potential */
//== std::stringstream sstr;
//== sstr << "spheric_potential_" << id_ << ".dat";
//== FILE* fout = fopen(sstr.str().c_str(), "w");
//== for (int ir = 0; ir < rgrid.num_points(); ir++)
//== {
//== fprintf(fout, "%18.10f %18.10f\n", rgrid[ir], veff[ir]);
//== }
//== fclose(fout);
//== STOP();
/* charge density */
Spline<double> rho(rgrid);
/* atomic level energies */
std::vector<double> level_energy(atom_type_.num_atomic_levels());
for (int ist = 0; ist < atom_type_.num_atomic_levels(); ist++) {
level_energy[ist] = -1.0 * atom_type_.zn() / 2 / std::pow(double(atom_type_.atomic_level(ist).n), 2);
}
#pragma omp parallel default(shared)
{
std::vector<double> rho_t(rho.num_points());
std::memset(&rho_t[0], 0, rho.num_points() * sizeof(double));
#pragma omp for
for (int ist = 0; ist < atom_type_.num_atomic_levels(); ist++) {
if (atom_type_.atomic_level(ist).core) {
Bound_state bs(core_rel__, atom_type_.zn(), atom_type_.atomic_level(ist).n, atom_type_.atomic_level(ist).l,
atom_type_.atomic_level(ist).k, rgrid, veff, level_energy[ist]);
auto& rho = bs.rho();
for (int i = 0; i < rgrid.num_points(); i++) {
rho_t[i] += atom_type_.atomic_level(ist).occupancy * rho[i] / fourpi;
}
level_energy[ist] = bs.enu();
}
}
#pragma omp critical
for (int i = 0; i < rho.num_points(); i++) {
rho[i] += rho_t[i];
}
}
for (int ir = 0; ir < atom_type_.num_mt_points(); ir++) {
core_charge_density_[ir] = rho[ir];
}
/* interpolate muffin-tin part of core density */
Spline<double> rho_mt(atom_type_.radial_grid(), core_charge_density_);
/* compute core leakage */
core_leakage_ = fourpi * (rho.interpolate().integrate(2) - rho_mt.integrate(2));
/* compute eigen-value sum of core states */
core_eval_sum_ = 0.0;
for (int ist = 0; ist < atom_type_.num_atomic_levels(); ist++) {
if (atom_type_.atomic_level(ist).core) {
core_eval_sum_ += level_energy[ist] * atom_type_.atomic_level(ist).occupancy;
}
}
}
} // namespace
#endif // __ATOM_SYMMETRY_CLASS_H__
|
omp_loop3.c | /* vim: set ts=4 sw=4: */
/* Filename : omp_loop3.c
* Description : simple OpenMP model
* Author : SunYoung Kim <sunyzero@gmail.com>
* Notes : omp_get_thread_num
*/
#include <stdio.h>
#include <omp.h>
int main()
{
int i;
/* combine two clauses */
#pragma omp parallel for
for (i=0; i<8; i++) {
printf("[%d] Hello OpenMP (%d)\n", i, omp_get_thread_num());
}
/* implicit barrier */
return 0;
}
|
GB_unaryop__ainv_int64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_int8
// op(A') function: GB_tran__ainv_int64_int8
// C type: int64_t
// A type: int8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_int8
(
int64_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int16_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_int8)
// op(A') function: GB (_unop_tran__identity_int16_int8)
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_int8)
(
int16_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/dataset.h>
#include <string>
#include <vector>
#include <memory>
namespace LightGBM {
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
*/
explicit Tree(const std::string& str);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param bin_type type of this feature, numerical or categorical
* \param threshold Threshold(bin) of split
* \param real_feature Index of feature, the original index on data
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int Split(int leaf, int feature, BinType bin_type, uint32_t threshold, int real_feature,
double threshold_double, double left_value,
double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
}
shrinkage_ *= rate;
}
/*! \brief Serialize this object to string*/
std::string ToString();
/*! \brief Serialize this object to json*/
std::string ToJSON();
template<typename T>
static bool CategoricalDecision(T fval, T threshold) {
if (static_cast<int>(fval) == static_cast<int>(threshold)) {
return true;
} else {
return false;
}
}
template<typename T>
static bool NumericalDecision(T fval, T threshold) {
if (fval <= threshold) {
return true;
} else {
return false;
}
}
static const char* GetDecisionTypeName(int8_t type) {
if (type == 0) {
return "no_greater";
} else {
return "is";
}
}
static std::vector<bool(*)(uint32_t, uint32_t)> inner_decision_funs;
static std::vector<bool(*)(double, double)> decision_funs;
private:
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
/*! \brief Serialize one node to json*/
inline std::string NodeToJSON(int index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
/*! \brief Decision type, 0 for '<='(numerical feature), 1 for 'is'(categorical feature) */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<double> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<data_size_t> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<data_size_t> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
bool has_categorical_;
};
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return 0.0f;
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
while (node >= 0) {
if (decision_funs[decision_type_[node]](
feature_values[split_feature_[node]],
threshold_[node])) {
node = left_child_[node];
} else {
node = right_child_[node];
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
RCCE.h | //
// Copyright 2010 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef RCCE_H
#define RCCE_H
#include <stdlib.h>
#include <stdio.h>
#define _RCCE "1.0.7 release"
// little trick to allow the application to be called "RCCE_APP" under
// OpenMP, and "main" otherwise
#ifndef _OPENMP
#define RCCE_APP main
#endif
// modify next line for BareMetal, which supports stdout, but not stdferr
#define STDERR stdout
#define LOG2_LINE_SIZE 5
#define RCCE_LINE_SIZE (1<<LOG2_LINE_SIZE)
// RCCE_BUFF_SIZE_MAX is space per UE, which is half of the space per tile
#define RCCE_BUFF_SIZE_MAX (1<<13)
#define RCCE_MAXNP 48
#define RCCE_SUCCESS 0
#define RCCE_ERROR_BASE 1234321
#define RCCE_ERROR_TARGET (RCCE_ERROR_BASE + 1)
#define RCCE_ERROR_SOURCE (RCCE_ERROR_BASE + 2)
#define RCCE_ERROR_ID (RCCE_ERROR_BASE + 3)
#define RCCE_ERROR_MESSAGE_LENGTH (RCCE_ERROR_BASE + 4)
#define RCCE_ERROR_FLAG_UNDEFINED (RCCE_ERROR_BASE + 5)
#define RCCE_ERROR_NUM_UES (RCCE_ERROR_BASE + 6)
#define RCCE_ERROR_DATA_OVERLAP (RCCE_ERROR_BASE + 7)
#define RCCE_ERROR_ALIGNMENT (RCCE_ERROR_BASE + 8)
#define RCCE_ERROR_DEBUG_FLAG (RCCE_ERROR_BASE + 9)
#define RCCE_ERROR_FLAG_NOT_IN_COMM_BUFFER (RCCE_ERROR_BASE + 10)
#define RCCE_ERROR_FLAG_STATUS_UNDEFINED (RCCE_ERROR_BASE + 11)
#define RCCE_ERROR_FLAG_NOT_ALLOCATED (RCCE_ERROR_BASE + 12)
#define RCCE_ERROR_VAL_UNDEFINED (RCCE_ERROR_BASE + 13)
#define RCCE_ERROR_INVALID_ERROR_CODE (RCCE_ERROR_BASE + 14)
#define RCCE_ERROR_RPC_NOT_ALLOCATED (RCCE_ERROR_BASE + 15)
#define RCCE_ERROR_RPC_INTERNAL (RCCE_ERROR_BASE + 16)
#define RCCE_ERROR_MULTIPLE_RPC_REQUESTS (RCCE_ERROR_BASE + 17)
#define RCCE_ERROR_FDIVIDER (RCCE_ERROR_BASE + 18)
#define RCCE_ERROR_FREQUENCY_EXCEEDED (RCCE_ERROR_BASE + 19)
#define RCCE_ERROR_NO_ACTIVE_RPC_REQUEST (RCCE_ERROR_BASE + 20)
#define RCCE_ERROR_STALE_RPC_REQUEST (RCCE_ERROR_BASE + 21)
#define RCCE_ERROR_COMM_UNDEFINED (RCCE_ERROR_BASE + 22)
#define RCCE_ERROR_ILLEGAL_OP (RCCE_ERROR_BASE + 23)
#define RCCE_ERROR_ILLEGAL_TYPE (RCCE_ERROR_BASE + 24)
#define RCCE_ERROR_MALLOC (RCCE_ERROR_BASE + 25)
#define RCCE_ERROR_COMM_INITIALIZED (RCCE_ERROR_BASE + 26)
#define RCCE_ERROR_CORE_NOT_IN_HOSTFILE (RCCE_ERROR_BASE + 27)
#define RCCE_MAX_ERROR_STRING 45
#define RCCE_DEBUG_ALL 111111
#define RCCE_DEBUG_SYNCH 111444
#define RCCE_DEBUG_COMM 111555
#define RCCE_DEBUG_RPC 111666
#define RCCE_DEBUG_DEBUG 111888
#define RCCE_FLAG_SET 1
#define RCCE_FLAG_UNSET 0
#define RCCE_NUM_OPS 4
#define RCCE_OP_BASE 23232323
#define RCCE_SUM (RCCE_OP_BASE)
#define RCCE_MIN (RCCE_OP_BASE+1)
#define RCCE_MAX (RCCE_OP_BASE+2)
#define RCCE_PROD (RCCE_OP_BASE+3)
#define RCCE_TYPE_BASE 63636363
#define RCCE_INT (RCCE_TYPE_BASE)
#define RCCE_LONG (RCCE_TYPE_BASE+1)
#define RCCE_FLOAT (RCCE_TYPE_BASE+2)
#define RCCE_DOUBLE (RCCE_TYPE_BASE+3)
// MPB pointer type
typedef volatile unsigned char* t_vcharp;
#ifdef SINGLEBITFLAGS
typedef struct {
int location; /* location of bit within line (0-255) */
t_vcharp line_address; /* start of cache line containing flag */
} RCCE_FLAG;
#else
typedef volatile int *RCCE_FLAG;
#endif
typedef int RCCE_FLAG_STATUS;
typedef struct {
int size;
int my_rank;
int initialized;
int member[RCCE_MAXNP];
RCCE_FLAG gather;
RCCE_FLAG release;
} RCCE_COMM;
#ifdef RC_POWER_MANAGEMENT
typedef struct{
int release;
int old_voltage_level;
int new_voltage_level;
int old_frequency_divider;
int new_frequency_divider;
long long start_cycle;
} RCCE_REQUEST;
int RCCE_power_domain(void);
int RCCE_iset_power(int, RCCE_REQUEST *, int *, int *);
int RCCE_wait_power(RCCE_REQUEST *);
int RCCE_set_frequency_divider(int, int *);
int RCCE_power_domain_master(void);
int RCCE_power_domain_size(void);
#endif
int RCCE_init(int *, char***);
int RCCE_finalize(void);
double RCCE_wtime(void);
int RCCE_ue(void);
int RCCE_num_ues(void);
#ifdef GORY
t_vcharp RCCE_malloc(size_t);
t_vcharp RCCE_malloc_request(size_t, size_t *);
void RCCE_free(t_vcharp);
int RCCE_put(t_vcharp, t_vcharp, int, int);
int RCCE_get(t_vcharp, t_vcharp, int, int);
int RCCE_wait_until(RCCE_FLAG, RCCE_FLAG_STATUS);
int RCCE_flag_alloc(RCCE_FLAG *);
int RCCE_flag_free(RCCE_FLAG *);
int RCCE_flag_write(RCCE_FLAG *, RCCE_FLAG_STATUS, int);
int RCCE_flag_read(RCCE_FLAG, RCCE_FLAG_STATUS *, int);
int RCCE_send(char *, t_vcharp, size_t, RCCE_FLAG *, RCCE_FLAG *, size_t, int);
int RCCE_recv(char *, t_vcharp, size_t, RCCE_FLAG *, RCCE_FLAG *, size_t, int);
int RCCE_recv_test(char *, t_vcharp, size_t, RCCE_FLAG *, RCCE_FLAG *,
size_t, int, int *);
#else
int RCCE_send(char *, size_t, int);
int RCCE_recv(char *, size_t, int);
int RCCE_recv_test(char *, size_t, int, int *);
int RCCE_allreduce(char *, char *, int, int, int, RCCE_COMM);
int RCCE_reduce(char *, char *, int, int, int, int, RCCE_COMM);
int RCCE_bcast(char *, size_t, int, RCCE_COMM);
#endif
int RCCE_comm_split(int (*)(int, void *), void *, RCCE_COMM *);
int RCCE_comm_free(RCCE_COMM *);
int RCCE_comm_size(RCCE_COMM, int *);
int RCCE_comm_rank(RCCE_COMM, int *);
void RCCE_fence(void);
int RCCE_barrier(RCCE_COMM *);
int RCCE_error_string(int, char *, int *);
int RCCE_debug_set(int);
int RCCE_debug_unset(int);
extern RCCE_COMM RCCE_COMM_WORLD;
#ifdef RC_POWER_MANAGEMENT
extern RCCE_COMM RCCE_P_COMM;
#define RCCE_POWER_DEFAULT -99999
#endif
#ifdef _OPENMP
#pragma omp threadprivate (RCCE_COMM_WORLD)
#ifdef RC_POWER_MANAGEMENT
#pragma omp threadprivate (RCCE_P_COMM)
#endif
#endif
#endif
|
tilted.h | #ifndef batoid_tilted_h
#define batoid_tilted_h
#include "surface.h"
namespace batoid {
#if defined(BATOID_GPU)
#pragma omp declare target
#endif
class Tilted : public Surface {
public:
Tilted(double tanx, double tany);
~Tilted();
virtual const Surface* getDevPtr() const override;
virtual double sag(double, double) const override;
virtual void normal(
double x, double y,
double& nx, double& ny, double& nz
) const override;
virtual bool timeToIntersect(
double x, double y, double z,
double vx, double vy, double vz,
double& dt
) const override;
private:
const double _tanx, _tany;
};
#if defined(BATOID_GPU)
#pragma omp end declare target
#endif
}
#endif
|
hillclimb.c | #define _POSIX_C_SOURCE 200112L
#define WIN32_LEAN_AND_MEAN
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define HASHN 3 // number of multiplies in hash
#define SHIFT_RANGE 1 // radius of shift search
#define CONST_RANGE 2 // radius of const search
#define QUALITY 18 // 2^N iterations of estimate samples
#define THRESHOLD 1.95 // regenerate anything lower than this estimate
static int optind = 1;
static int opterr = 1;
static int optopt;
static char *optarg;
static int
getopt(int argc, char * const argv[], const char *optstring)
{
static int optpos = 1;
const char *arg;
(void)argc;
/* Reset? */
if (optind == 0) {
optind = 1;
optpos = 1;
}
arg = argv[optind];
if (arg && strcmp(arg, "--") == 0) {
optind++;
return -1;
} else if (!arg || arg[0] != '-' || !isalnum(arg[1])) {
return -1;
} else {
const char *opt = strchr(optstring, arg[optpos]);
optopt = arg[optpos];
if (!opt) {
if (opterr && *optstring != ':')
fprintf(stderr, "%s: illegal option: %c\n", argv[0], optopt);
return '?';
} else if (opt[1] == ':') {
if (arg[optpos + 1]) {
optarg = (char *)arg + optpos + 1;
optind++;
optpos = 1;
return optopt;
} else if (argv[optind + 1]) {
optarg = (char *)argv[optind + 1];
optind += 2;
optpos = 1;
return optopt;
} else {
if (opterr && *optstring != ':')
fprintf(stderr,
"%s: option requires an argument: %c\n",
argv[0], optopt);
return *optstring == ':' ? ':' : '?';
}
} else {
if (!arg[++optpos]) {
optind++;
optpos = 1;
}
return optopt;
}
}
}
#if defined(__unix__)
#include <sys/time.h>
uint64_t
uepoch(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return 1000000LL * tv.tv_sec + tv.tv_usec;
}
#elif defined(_WIN32)
#include <windows.h>
uint64_t
uepoch(void)
{
FILETIME ft;
GetSystemTimeAsFileTime(&ft);
uint64_t tt = ft.dwHighDateTime;
tt <<= 32;
tt |= ft.dwLowDateTime;
tt /=10;
tt -= UINT64_C(11644473600000000);
return tt;
}
#endif
static uint64_t
rand64(uint64_t s[4])
{
uint64_t x = s[1] * 5;
uint64_t r = ((x << 7) | (x >> 57)) * 9;
uint64_t t = s[1] << 17;
s[2] ^= s[0];
s[3] ^= s[1];
s[1] ^= s[2];
s[0] ^= s[3];
s[2] ^= t;
s[3] = (s[3] << 45) | (s[3] >> 19);
return r;
}
struct hash {
uint32_t c[HASHN];
char s[HASHN + 1];
};
static void
hash_gen(struct hash *h, uint64_t rng[4])
{
for (int i = 0; i < HASHN; i++)
h->c[i] = (rand64(rng) >> 32) | 1u;
for (int i = 0; i <= HASHN; i++)
h->s[i] = 16;
}
static int
hash_equal(const struct hash *a, const struct hash *b)
{
for (int i = 0; i < HASHN; i++) {
if (a->c[i] != b->c[i])
return 0;
if (a->s[i] != b->s[i])
return 0;
}
return a->s[HASHN] == b->s[HASHN];
}
static void
hash_print(const struct hash *h)
{
putchar('[');
for (int i = 0; i < HASHN; i++)
printf("%2d %08lx ", h->s[i], (unsigned long)h->c[i]);
printf("%2d]", h->s[HASHN]);
fflush(stdout);
}
static int
hash_parse(struct hash *h, char *str)
{
long s;
unsigned long c;
char *end, *tok;
if (*str != '[')
return 0;
str++;
for (int i = 0; i < HASHN; i++) {
tok = strtok(i ? 0 : str, " ");
s = strtol(tok, &end, 10);
if (s < 1 || s > 31 || !(*end == 0 || *end == ' '))
return 0;
h->s[i] = s;
tok = strtok(0, " ");
c = strtoul(tok, &end, 16);
if (c > 0xffffffffUL || !(*end == 0 || *end == ' '))
return 0;
h->c[i] = c;
}
tok = strtok(0, "]");
s = strtol(tok, &end, 10);
if (s < 1 || s > 31 || *end)
return 0;
h->s[HASHN] = s;
return 1;
}
static uint32_t
hash(const struct hash *h, uint32_t x)
{
for (int i = 0; i < HASHN; i++) {
x ^= x >> h->s[i];
x *= h->c[i];
}
x ^= x >> h->s[HASHN];
return x;
}
static double
estimate_bias32(const struct hash *f, uint64_t rng[4])
{
long n = 1L << QUALITY;
long bins[32][32] = {{0}};
for (long i = 0; i < n; i++) {
uint32_t x = rand64(rng);
uint32_t h0 = hash(f, x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = hash(f, x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
#define EXACT_SPLIT 32 // must be power of two
static double
exact_bias32(const struct hash *f)
{
int i; // declare here to work around Visual Studio issue
long long bins[32][32] = {{0}};
static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT;
#pragma omp parallel for
for (i = 0; i < EXACT_SPLIT; i++) {
long long b[32][32] = {{0}};
for (uint64_t x = i * range; x < (i + 1) * range; x++) {
uint32_t h0 = hash(f, x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = hash(f, x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
b[j][k] += (set >> k) & 1;
}
}
#pragma omp critical
for (int j = 0; j < 32; j++)
for (int k = 0; k < 32; k++)
bins[j][k] += b[j][k];
}
double mean = 0.0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - 2147483648L) / 2147483648.0;
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static void
hash_gen_strict(struct hash *h, uint64_t rng[4])
{
do
hash_gen(h, rng);
while (estimate_bias32(h, rng) > THRESHOLD);
}
static uint64_t
load64(const void *buf)
{
const unsigned char *p = buf;
return (uint64_t)p[0] << 0 |
(uint64_t)p[1] << 8 |
(uint64_t)p[2] << 16 |
(uint64_t)p[3] << 24 |
(uint64_t)p[4] << 32 |
(uint64_t)p[5] << 40 |
(uint64_t)p[6] << 48 |
(uint64_t)p[7] << 56;
}
static uint64_t
mix64(uint64_t x, uint64_t y)
{
uint64_t r = 0x2b8a130976726633 * x - 0xb28cbd28446adb17 * y;
r ^= r >> 32;
return r;
}
static uint64_t
hash64(uint64_t x, uint64_t m)
{
x *= m;
x ^= x >> 32;
return x;
}
static void
mix64x4(uint64_t x[4])
{
uint64_t i = 0xf81db9ba6dabee4e;
uint64_t m = 0xb1d9e3fbc08321db;
x[0] = hash64(x[0] + UINT64_C(0x347534cdcf0982b6), m);
x[1] = hash64(x[1] + UINT64_C(0x975e2ee8f0f23aa8), m += i);
x[2] = hash64(x[2] + UINT64_C(0x7baf736c6c769a0b), m += i);
x[3] = hash64(x[3] + UINT64_C(0x884afc96accb90d9), m += i);
#define ROUND64(a, b, c, d) \
x[b] = mix64(hash64(x[a], m += i), x[b]); \
x[c] = mix64(hash64(x[a], m += i), x[c]); \
x[d] = mix64(hash64(x[a], m += i), x[d])
ROUND64(0, 1, 2, 3);
ROUND64(1, 0, 2, 3);
ROUND64(2, 0, 1, 3);
ROUND64(3, 0, 1, 3);
#undef ROUND64
}
static void
rng_init(uint64_t rng[4])
{
void *p = malloc(1024L * 1024);
rng[0] = uepoch();
rng[1] = (uint64_t)rng_init;
rng[2] = (uint64_t)rng;
rng[3] = (uint64_t)p;
free(p);
mix64x4(rng);
}
/* Modular multiplicative inverse (32-bit) */
static uint32_t
modinv32(uint32_t x)
{
uint32_t a = x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
return x;
}
static void
usage(FILE *f)
{
fprintf(f, "usage: hillclimb [-EhIqs] [-p INIT] [-x SEED]\n");
fprintf(f, " -E Evaluate given pattern (-p)\n");
fprintf(f, " -h Print this message and exit\n");
fprintf(f, " -I Invert given pattern (-p) an quit\n");
fprintf(f, " -p INIT Provide an initial hash function\n");
fprintf(f, " -q Print less information (quiet)\n");
fprintf(f, " -s Quit after finding a local minima\n");
fprintf(f, " -x SEED Seed PRNG from a string (up to 32 bytes)\n");
}
int
main(int argc, char **argv)
{
int seeded = 0;
uint64_t rng[4];
struct hash cur, last = {0};
int generate = 1;
int one_shot = 0;
int quiet = 0;
int invert = 0;
int evaluate = 0;
double cur_score = -1;
int option;
while ((option = getopt(argc, argv, "EhIp:qsx:")) != -1) {
switch (option) {
case 'E': {
evaluate = 1;
} break;
case 'h': {
usage(stdout);
exit(EXIT_SUCCESS);
} break;
case 'I': {
invert = 1;
} break;
case 'p': {
if (!hash_parse(&cur, optarg)) {
fprintf(stderr, "hillclimb: invalid pattern: %s\n", optarg);
exit(EXIT_FAILURE);
}
generate = 0;
} break;
case 'q': {
quiet++;
} break;
case 's': {
one_shot = 1;
} break;
case 'x': {
unsigned char buf[32] = {0};
size_t len = strlen(optarg);
if (len > sizeof(buf)) {
fprintf(stderr, "hillclimb: seed too long (> 32 bytes)\n");
exit(EXIT_FAILURE);
}
memcpy(buf, optarg, len);
rng[0] = load64(buf + 0);
rng[1] = load64(buf + 8);
rng[2] = load64(buf + 16);
rng[3] = load64(buf + 24);
mix64x4(rng);
seeded = 1;
} break;
default:
usage(stderr);
exit(EXIT_FAILURE);
}
}
if (invert) {
if (generate) {
fprintf(stderr, "hillclimb: -I requires -p\n");
exit(EXIT_FAILURE);
}
printf("uint32_t\nhash_r(uint32_t x)\n{\n");
for (int i = 0; i < HASHN * 2 + 1; i++) {
switch (i & 1) {
case 0: {
int s = HASHN - i / 2;
printf(" x ^=");
for (int i = cur.s[s]; i < 32; i += cur.s[s])
printf(" %sx >> %d", i == cur.s[s] ? "" : "^ ", i);
printf(";\n");
} break;
case 1: {
int c = HASHN - (i + 1) / 2;
unsigned long inv = modinv32(cur.c[c]);
printf(" x *= UINT32_C(0x%08lx);\n", inv);
} break;
}
}
printf(" return x;\n}\n");
exit(EXIT_SUCCESS);
}
if (evaluate) {
if (generate) {
fprintf(stderr, "hillclimb: -E requires -p\n");
exit(EXIT_FAILURE);
}
hash_print(&cur);
printf(" = %.17g\n", exact_bias32(&cur));
exit(EXIT_SUCCESS);
}
if (!seeded)
rng_init(rng);
if (generate)
hash_gen_strict(&cur, rng);
for (;;) {
int found = 0;
struct hash best;
double best_score;
if (quiet < 2)
hash_print(&cur);
if (cur_score < 0)
cur_score = exact_bias32(&cur);
if (quiet < 2)
printf(" = %.17g\n", cur_score);
best = cur;
best_score = cur_score;
/* Explore around shifts */
for (int i = 0; i <= HASHN; i++) {
/* In theory the shift could drift above 31 or below 1, but
* in practice it would never get this far since these would
* be terrible hashes.
*/
for (int d = -SHIFT_RANGE; d <= +SHIFT_RANGE; d++) {
if (d == 0) continue;
struct hash tmp = cur;
tmp.s[i] += d;
if (hash_equal(&tmp, &last)) continue;
if (quiet <= 0) {
printf(" ");
hash_print(&tmp);
}
double score = exact_bias32(&tmp);
if (quiet <= 0)
printf(" = %.17g\n", score);
if (score < best_score) {
best_score = score;
best = tmp;
found = 1;
}
}
}
/* Explore around constants */
for (int i = 0; i < HASHN; i++) {
for (int d = -CONST_RANGE; d <= +CONST_RANGE; d += 2) {
if (d == 0) continue;
struct hash tmp = cur;
tmp.c[i] += d;
if (hash_equal(&tmp, &last)) continue;
if (quiet <= 0) {
printf(" ");
hash_print(&tmp);
}
double score = exact_bias32(&tmp);
if (quiet <= 0)
printf(" = %.17g\n", score);
if (score < best_score) {
best_score = score;
best = tmp;
found = 1;
}
}
}
if (found) {
/* Move to the lowest item found */
if (quiet < 1)
puts("CLIMB");
last = cur;
cur = best;
cur_score = best_score;
} else if (one_shot) {
/* Hit local minima, exit */
if (quiet < 1)
puts("DONE");
hash_print(&cur);
printf(" = %.17g\n", cur_score);
break;
} else {
/* Hit local minima, reset */
if (quiet < 1)
puts("RESET");
hash_print(&cur);
printf(" = %.17g\n", cur_score);
last.s[0] = 0; // set to invalid
hash_gen_strict(&cur, rng);
cur_score = -1;
}
}
}
|
convolution_sgemm_pack1ton.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack1ton_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
float* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
int i = 0;
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum, vl);
outptr0 += packn;
}
}
}
static void convolution_im2col_sgemm_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1ton_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
GB_unop__ainv_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_fp64_fp64)
// op(A') function: GB (_unop_tran__ainv_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fast_gaussian_blur_template.h | // Copyright (C) 2017-2022 Basile Fraboni
// Copyright (C) 2014 Ivan Kutskir (for the original fast blur implmentation)
// All Rights Reserved
// You may use, distribute and modify this code under the
// terms of the MIT license. For further details please refer
// to : https://mit-license.org/
//
//#pragma once
#include <Arduino.h>
//!
//! \file fast_gaussian_blur_template.h
//! \author Basile Fraboni
//! \date 2017 - 2022
//!
//! \brief This contains a C++ implementation of a fast Gaussian blur algorithm in linear time.
//!
//! The image buffer is supposed to be of size w * h * c, h its height, with w its width,
//! and c its number of channels.
//! The default implementation only supports up to 4 channels images, but one can easily add support for any number of channels
//! using either specific template cases or a generic function that takes the number of channels as an explicit parameter.
//! This implementation is focused on learning and readability more than on performance.
//! The fast blur algorithm is performed with several box blur passes over an image.
//! The filter converges towards a true Gaussian blur after several passes (thanks TCL). In practice,
//! three passes are sufficient for good quality results.
//! For further details please refer to:
//! - http://blog.ivank.net/fastest-gaussian-blur.html
//! - https://www.peterkovesi.com/papers/FastGaussianSmoothing.pdf
//! - https://github.com/bfraboni/FastGaussianBlur
//!
//! **Note:** The fast gaussian blur algorithm is not accurate on image boundaries.
//! It performs a diffusion of the signal with several independant passes, each pass depending
//! of the preceding one. Some of the diffused signal is lost near borders and results in a slight
//! loss of accuracy for next pass. This problem can be solved by increasing the image support of
//! half the box kernel extent at each pass of the algorithm. The added padding would in this case
//! capture the diffusion and make the next pass accurate.
//! On contrary true Gaussian blur does not suffer this problem since the whole diffusion process
//! is performed in one pass only.
//! The extra padding is not performed in this implementation, however we provide and discuss several border
//! policies resulting in dfferent approximations and accuracies.
//!
//!
//! \brief Enumeration that decribes border policies for filters.
//!
//! For a detailed description of border policies please refer to:
//! https://en.wikipedia.org/wiki/Kernel_(image_processing)#Edge_Handling
//!
//! \todo Add support for other border policies (wrap, mirror, constant)
enum BorderPolicy {
kExtend,
kKernelCrop,
// kWrap,
// kMirror,
// kConstant
};
//!
//! \brief This function performs a single separable horizontal box blur pass.
//!
//! To complete a box blur pass we need to do this operation two times, one horizontally
//! and one vertically. Templated by buffer data type T, buffer number of channels C, and border policy P.
//!
//! \param[in] in source buffer
//! \param[in,out] out target buffer
//! \param[in] w image width
//! \param[in] h image height
//! \param[in] r box dimension
//!
template<typename T, int C, BorderPolicy P = kKernelCrop>
void horizontal_blur(const T * in, T * out, const int w, const int h, const int r)
{
float iarr = 1.f / (r+r+1);
#pragma omp parallel for
for(int i=0; i<h; i++)
{
int ti = i*w, li = ti, ri = ti+r;
float fv[C], lv[C], val[C];
for(int ch = 0; ch < C; ++ch)
{
fv[ch] = P == kExtend ? in[ti*C+ch] : 0; // unused with kcrop policy
lv[ch] = P == kExtend ? in[(ti+w-1)*C+ch] : 0; // unused with kcrop policy
val[ch] = P == kExtend ? (r+1)*fv[ch] : 0;
}
// initial acucmulation
for(int j=0; j<r; j++)
for(int ch = 0; ch < C; ++ch)
{
val[ch] += in[(ti+j)*C+ch];
}
// left border - filter kernel is incomplete
for(int j=0; j<=r; j++, ri++, ti++)
for(int ch = 0; ch < C; ++ch)
{
val[ch] += P == kExtend ? in[ri*C+ch] - fv[ch] : in[ri*C+ch];
out[ti*C+ch] = P == kExtend ? val[ch]*iarr : val[ch]/(r+j+1);
}
// center of the image - filter kernel is complete
for(int j=r+1; j<w-r; j++, ri++, ti++, li++)
for(int ch = 0; ch < C; ++ch)
{
val[ch] += in[ri*C+ch] - in[li*C+ch];
out[ti*C+ch] = val[ch]*iarr;
}
// right border - filter kernel is incomplete
for(int j=w-r; j<w; j++, ti++, li++)
for(int ch = 0; ch < C; ++ch)
{
val[ch] += P == kExtend ? lv[ch] - in[li*C+ch] : -in[li*C+ch];
out[ti*C+ch] = P == kExtend ? val[ch]*iarr : val[ch]/(r+w-j);
}
}
}
//!
//! \brief Utility template dispatcher function for horizontal_blur. Templated by buffer data type T.
//!
//! \param[in] in source buffer
//! \param[in,out] out target buffer
//! \param[in] w image width
//! \param[in] h image height
//! \param[in] c image channels
//! \param[in] r box dimension
//!
template<typename T>
void horizontal_blur(const T * in, T * out, const int w, const int h, const int c, const int r)
{
switch(c)
{
case 1: horizontal_blur<T,1>(in, out, w, h, r); break;
case 2: horizontal_blur<T,2>(in, out, w, h, r); break;
case 3: horizontal_blur<T,3>(in, out, w, h, r); break;
case 4: horizontal_blur<T,4>(in, out, w, h, r); break;
default: printf("horizontal_blur over %d channels is not supported yet. Add a specific case if possible or fall back to the generic version.\n", c); break;
// default: horizontal_blur<T>(in, out, w, h, c, r); break;
}
}
//!
//! \brief This function performs a 2D tranposition of an image.
//!
//! The transposition is done per
//! block to reduce the number of cache misses and improve cache coherency for large image buffers.
//! Templated by buffer data type T and buffer number of channels C.
//!
//! \param[in] in source buffer
//! \param[in,out] out target buffer
//! \param[in] w image width
//! \param[in] h image height
//!
template<typename T, int C>
void flip_block(const T * in, T * out, const int w, const int h)
{
constexpr int block = 256/C;
#pragma omp parallel for collapse(2)
for(int x= 0; x < w; x+= block)
for(int y= 0; y < h; y+= block)
{
const T * p = in + y*w*C + x*C;
T * q = out + y*C + x*h*C;
const int blockx= min(w, x+block) - x;
const int blocky= min(h, y+block) - y;
for(int xx= 0; xx < blockx; xx++)
{
for(int yy= 0; yy < blocky; yy++)
{
for(int k= 0; k < C; k++)
q[k]= p[k];
p+= w*C;
q+= C;
}
p+= -blocky*w*C + C;
q+= -blocky*C + h*C;
}
}
}
//!
//! \brief Utility template dispatcher function for flip_block. Templated by buffer data type T.
//!
//! \param[in] in source buffer
//! \param[in,out] out target buffer
//! \param[in] w image width
//! \param[in] h image height
//! \param[in] c image channels
//!
template<typename T>
void flip_block(const T * in, T * out, const int w, const int h, const int c)
{
switch(c)
{
case 1: flip_block<T,1>(in, out, w, h); break;
case 2: flip_block<T,2>(in, out, w, h); break;
case 3: flip_block<T,3>(in, out, w, h); break;
case 4: flip_block<T,4>(in, out, w, h); break;
default: printf("flip_block over %d channels is not supported yet. Add a specific case if possible or fall back to the generic version.\n", c); break;
// default: flip_block<T>(in, out, w, h, c); break;
}
}
//!
//! \brief This function converts the standard deviation of
//! Gaussian blur into a box radius for each box blur pass.
//!
//! For further details please refer to :
//! - https://www.peterkovesi.com/papers/FastGaussianSmoothing.pdf
//!
//! \param[out] boxes box radiis for kernel sizes of 2*boxes[i]+1
//! \param[in] sigma Gaussian standard deviation
//! \param[in] n number of box blur pass
//!
void sigma_to_box_radius(int boxes[], const float sigma, const int n)
{
// ideal filter width
float wi = sqrt((12*sigma*sigma/n)+1);
int wl = wi; // no need std::floor
if(wl%2==0) wl--;
int wu = wl+2;
float mi = (12*sigma*sigma - n*wl*wl - 4*n*wl - 3*n)/(-4*wl - 4);
int m = mi+0.5f; // avoid std::round by adding 0.5f and cast to integer type
for(int i=0; i<n; i++)
boxes[i] = ((i < m ? wl : wu) - 1) / 2;
}
//!
//! \brief This function performs a fast Gaussian blur. Templated by buffer data type T and number of passes N.
//!
//! Applying several times box blur tends towards a true Gaussian blur (thanks TCL). Three passes are sufficient
//! for good results. Templated by buffer data type T and number of passes N. The input buffer is also used
//! as temporary and modified during the process hence it can not be constant.
//!
//! Usually the process should alternate between horizontal and vertical passes
//! as much times as we want box blur passes. However thanks to box blur properties
//! the separable passes can be performed in any order without changing the result.
//! Hence for performance purposes the algorithm is:
//! - apply N times horizontal blur (horizontal passes)
//! - flip the image buffer (transposition)
//! - apply N times horizontal blur (vertical passes)
//! - flip the image buffer (transposition)
//!
//! We provide two version of the function:
//! - generic N passes (in which more std::swap are used)
//! - specialized 3 passes only
//!
//! \param[in,out] in source buffer reference ptr
//! \param[in,out] out target buffer reference ptr
//! \param[in] w image width
//! \param[in] h image height
//! \param[in] c image channels
//! \param[in] sigma Gaussian standard deviation
//!
template<typename T, unsigned int N>
void fast_gaussian_blur(T * in, T * out, const int w, const int h, const int c, const float sigma)
{
// compute box kernel sizes
int boxes[N];
sigma_to_box_radius(boxes, sigma, N);
// perform N horizontal blur passes
for(int i = 0; i < N; ++i)
{
horizontal_blur(in, out, w, h, c, boxes[i]);
std::swap(in, out);
}
// flip buffer
flip_block(in, out, w, h, c);
std::swap(in, out);
// perform N horizontal blur passes on flipped image
for(int i = 0; i < N; ++i)
{
horizontal_blur(in, out, h, w, c, boxes[i]);
std::swap(in, out);
}
// flip buffer
flip_block(in, out, h, w, c);
}
//!
//! \brief Specialized 3 passes of separable fast box blur with less std::swap. Templated by buffer data type T.
//!
//! Applying several times box blur tends towards a true Gaussian blur (thanks TCL). Three passes are sufficient
//! for good results. Templated by buffer data type T and number of passes N. The input buffer is also used
//! as temporary and modified during the process hence it can not be constant.
//!
//! Usually the process should alternate between horizontal and vertical passes
//! as much times as we want box blur passes. However thanks to box blur properties
//! the separable passes can be performed in any order without changing the result.
//! Hence for performance purposes the algorithm is:
//! - apply N times horizontal blur (horizontal passes)
//! - flip the image buffer (transposition)
//! - apply N times horizontal blur (vertical passes)
//! - flip the image buffer (transposition)
//!
//! We provide two version of the function:
//! - generic N passes (in which more std::swap are used)
//! - specialized 3 passes only
//!
//! \param[in,out] in source buffer reference ptr
//! \param[in,out] out target buffer reference ptr
//! \param[in] w image width
//! \param[in] h image height
//! \param[in] c image channels
//! \param[in] sigma Gaussian standard deviation
//!
template<typename T>
void fast_gaussian_blur(T * in, T * out, const int w, const int h, const int c, const float sigma)
{
// compute box kernel sizes
int boxes[3];
sigma_to_box_radius(boxes, sigma, 3);
// perform 3 horizontal blur passes
horizontal_blur(in, out, w, h, c, boxes[0]);
horizontal_blur(out, in, w, h, c, boxes[1]);
horizontal_blur(in, out, w, h, c, boxes[2]);
// flip buffer
flip_block(out, in, w, h, c);
// perform 3 horizontal blur passes on flipped image
horizontal_blur(in, out, h, w, c, boxes[0]);
horizontal_blur(out, in, h, w, c, boxes[1]);
horizontal_blur(in, out, h, w, c, boxes[2]);
// flip buffer
flip_block(out, in, h, w, c);
// swap pointers to get result in the ouput buffer
std::swap(in, out);
}
//!
//! \brief Utility template dispatcher function for fast_gaussian_blur. Templated by buffer data type T.
//!
//! This is the main exposed function and the one that should be used in programs.
//!
//! \todo Make border policies an argument of this function.
//!
//! \param[in,out] in source buffer reference ptr
//! \param[in,out] out target buffer reference ptr
//! \param[in] w image width
//! \param[in] h image height
//! \param[in] c image channels
//! \param[in] sigma Gaussian standard deviation
//! \param[in] n number of passes, should be > 0
//!
template<typename T>
void fast_gaussian_blur(T * in, T * out, const int w, const int h, const int c, const float sigma, const unsigned int n)
{
switch(n)
{
case 1: fast_gaussian_blur<T,1>(in, out, w, h, c, sigma); break;
case 2: fast_gaussian_blur<T,2>(in, out, w, h, c, sigma); break;
case 3: fast_gaussian_blur<T>(in, out, w, h, c, sigma); break; // specialized 3 passes version
case 4: fast_gaussian_blur<T,4>(in, out, w, h, c, sigma); break;
case 5: fast_gaussian_blur<T,5>(in, out, w, h, c, sigma); break;
case 6: fast_gaussian_blur<T,6>(in, out, w, h, c, sigma); break;
case 7: fast_gaussian_blur<T,7>(in, out, w, h, c, sigma); break;
case 8: fast_gaussian_blur<T,8>(in, out, w, h, c, sigma); break;
case 9: fast_gaussian_blur<T,9>(in, out, w, h, c, sigma); break;
case 10: fast_gaussian_blur<T,10>(in, out, w, h, c, sigma); break;
default: printf("fast_gaussian_blur with %d passes is not supported yet. Add a specific case if possible or fall back to the generic version.\n", n); break;
// default: fast_gaussian_blur<T,10>(in, out, w, h, c, sigma, n); break;
}
} |
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
* \param track_branch_features Whether to keep track of ancestors of leaf nodes
* \param is_linear Whether the tree has linear models at each leaf
*/
explicit Tree(int max_leaves, bool track_branch_features, bool is_linear);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
virtual ~Tree() noexcept = default;
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get parent of specific leaf*/
inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; }
/*! \brief Get feature of specific split (original feature index)*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature_inner(int split_idx) const { return split_feature_inner_[split_idx]; }
/*! \brief Get features on leaf's branch*/
inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const {
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const {
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline uint32_t threshold_in_bin(int node_idx) const {
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*! \brief Get the summed weights of data points that fall at or below this node*/
inline int data_weight(int node) const { return node >= 0 ? internal_weight_[node] : leaf_weight_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
virtual inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
if (is_linear_) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] * rate);
for (size_t j = 0; j < leaf_coeff_[i].size(); ++j) {
leaf_coeff_[i][j] = MaybeRoundToZero(leaf_coeff_[i][j] * rate);
}
}
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
if (is_linear_) {
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] * rate);
for (size_t j = 0; j < leaf_coeff_[num_leaves_ - 1].size(); ++j) {
leaf_coeff_[num_leaves_ - 1][j] = MaybeRoundToZero(leaf_coeff_[num_leaves_ - 1][j] * rate);
}
}
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
virtual inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
if (is_linear_) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] + val);
}
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] + val);
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
if (is_linear_) {
leaf_const_[0] = val;
}
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize linear model of tree node to json*/
std::string LinearModelToJSON(int index) const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval) {
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
/*! \brief Get the linear model constant term (bias) of one leaf */
inline double LeafConst(int leaf) const { return leaf_const_[leaf]; }
/*! \brief Get the linear model coefficients of one leaf */
inline std::vector<double> LeafCoeffs(int leaf) const { return leaf_coeff_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeaturesInner(int leaf) const {return leaf_features_inner_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeatures(int leaf) const {return leaf_features_[leaf]; }
/*! \brief Set the linear model coefficients on one leaf */
inline void SetLeafCoeffs(int leaf, const std::vector<double>& output) {
leaf_coeff_[leaf].resize(output.size());
for (size_t i = 0; i < output.size(); ++i) {
leaf_coeff_[leaf][i] = MaybeRoundToZero(output[i]);
}
}
/*! \brief Set the linear model constant term (bias) on one leaf */
inline void SetLeafConst(int leaf, double output) {
leaf_const_[leaf] = MaybeRoundToZero(output);
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeaturesInner(int leaf, const std::vector<int>& features) {
leaf_features_inner_[leaf] = features;
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeatures(int leaf, const std::vector<int>& features) {
leaf_features_[leaf] = features;
}
inline bool is_linear() const { return is_linear_; }
#ifdef USE_CUDA_EXP
inline bool is_cuda_tree() const { return is_cuda_tree_; }
#endif // USE_CUDA_EXP
inline void SetIsLinear(bool is_linear) {
is_linear_ = is_linear;
}
protected:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN) {
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval))
|| (missing_type == MissingType::NaN && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin)
|| (missing_type == MissingType::NaN && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
int int_fval;
if (std::isnan(fval)) {
return right_child_[node];
} else {
int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];
}
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values,
std::unordered_map<int, double>* phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
/*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */
bool track_branch_features_;
/*! \brief Features on leaf's branch, original index */
std::vector<std::vector<int>> branch_features_;
double shrinkage_;
int max_depth_;
/*! \brief Tree has linear model at each leaf */
bool is_linear_;
/*! \brief coefficients of linear models on leaves */
std::vector<std::vector<double>> leaf_coeff_;
/*! \brief constant term (bias) of linear models on leaves */
std::vector<double> leaf_const_;
/* \brief features used in leaf linear models; indexing is relative to num_total_features_ */
std::vector<std::vector<int>> leaf_features_;
/* \brief features used in leaf linear models; indexing is relative to used_features_ */
std::vector<std::vector<int>> leaf_features_inner_;
#ifdef USE_CUDA_EXP
/*! \brief Marks whether this tree is a CUDATree */
bool is_cuda_tree_;
#endif // USE_CUDA_EXP
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = left_weight + right_weight;
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
if (track_branch_features_) {
branch_features_[num_leaves_] = branch_features_[leaf];
branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]);
branch_features_[leaf].push_back(split_feature_[new_node_idx]);
}
}
inline double Tree::Predict(const double* feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeaf(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat_raw = leaf_features_[leaf][i];
double feat_val = feature_values[feat_raw];
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeafByMap(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat = leaf_features_[leaf][i];
auto val_it = feature_values.find(feat);
if (val_it != feature_values.end()) {
double feat_val = val_it->second;
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output) {
(*output)[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
omp_bug1fix.c | /******************************************************************************
* FILE: omp_bug1fix.c
* DESCRIPTION:
* This is a corrected version of the omp_bug1.c example. Corrections
* include removing all statements between the parallel for construct and
* the actual for loop, and introducing logic to preserve the ability to
* query a thread's id and print it from inside the for loop.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
#define CHUNKSIZE 5
int main (int argc, char *argv[])
{
int i, chunk, tid;
float a[N], b[N], c[N];
char first_time;
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
first_time = 'y';
#pragma omp parallel for \
shared(a,b,c,chunk) \
private(i,tid) \
schedule(static,chunk) \
firstprivate(first_time)
for (i=0; i < N; i++)
{
if (first_time == 'y')
{
tid = omp_get_thread_num();
first_time = 'n';
}
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
}
|
GB_unop__carg_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__carg_fp64_fc64)
// op(A') function: GB (_unop_tran__carg_fp64_fc64)
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = carg (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = carg (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = carg (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CARG || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__carg_fp64_fc64)
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = carg (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = carg (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__carg_fp64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_pack1ton_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack1ton_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
if (bias_data_ptr)
{
_sum = vle32_v_f32m2(bias_data_ptr + p * packn, vl);
}
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const __fp16* sptr = m.row<const __fp16>(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
__fp16 val = sptr[sx];
int k = y * kernel_w + x;
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfwmacc_vf_f32m2(_sum, val, _w, vl);
}
}
kptr += maxk * packn;
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_sum, vl), vl);
}
outptr += outw * packn;
}
}
}
static void deconvolution_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
if (bias_data_ptr)
{
_sum = vle16_v_f16m1(bias_data_ptr + p * packn, vl);
}
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const __fp16* sptr = m.row<const __fp16>(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
__fp16 val = sptr[sx];
int k = y * kernel_w + x;
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w, vl);
}
}
kptr += maxk * packn;
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse16_v_f16m1(outptr + j * packn, _sum, vl);
}
outptr += outw * packn;
}
}
}
|
GB_binop__isge_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_uint64
// A.*B function (eWiseMult): GB_AemultB__isge_uint64
// A*D function (colscale): GB_AxD__isge_uint64
// D*A function (rowscale): GB_DxB__isge_uint64
// C+=B function (dense accum): GB_Cdense_accumB__isge_uint64
// C+=b function (dense accum): GB_Cdense_accumb__isge_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_uint64
// C=scalar+B GB_bind1st__isge_uint64
// C=scalar+B' GB_bind1st_tran__isge_uint64
// C=A+scalar GB_bind2nd__isge_uint64
// C=A'+scalar GB_bind2nd_tran__isge_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sections.c | #include <stdio.h>
#include <omp.h>
void funcA() {
printf("En funcA: esta sección la ejecuta el thread
%d\n",
omp_get_thread_num());
}
void funcB() {
printf("En funcB: esta sección la ejecuta el thread
%d\n",
omp_get_thread_num());
}
main() {
#pragma omp parallel sections
{
#pragma omp section
(void) funcA();
#pragma omp section
(void) funcB();
}
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1)));
GetPixelInfo(image,image->colormap+i);
image->colormap[i].alpha_trait=BlendPixelTrait;
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=OpaqueAlpha;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int)
GetPixelInfoIntensity((const Image *) NULL,color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register ssize_t
x;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
GB_unop__lgamma_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lgamma_fp32_fp32)
// op(A') function: GB (_unop_tran__lgamma_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = lgammaf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgammaf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = lgammaf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lgamma_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = lgammaf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = lgammaf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lgamma_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hsrp_fmt_plug.c | /*
* Cracker for MD5 authentication in HSRP, HSRPv2, VRRP, and GLBP.
* http://www.rfc-editor.org/rfc/rfc1828.txt
*
* This is dedicated to Darya. You inspire me.
*
* This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* optimized Feb 2016, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hsrp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hsrp);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
// OMP_SCALE tuned on core i7 4-core HT
// 2048 - 8850k 6679k
// 4096 - 10642k 7278k
// 8192 - 10489k 7532k
// 16k - 10413k 7694k
// 32k - 12111k 7803k ** this value chosen
// 64k - 12420k 6523k
// 128k - 12220k 6741k
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32768
#endif
#endif
#endif
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "hsrp"
#define FORMAT_NAME "\"MD5 authentication\" HSRP, HSRPv2, VRRP, GLBP"
#define FORMAT_TAG "$hsrp$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define REAL_SALT_SIZE 50
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"},
{"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"},
{"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"},
/* HSRPv2 hashes */
{"$hsrp$0128020006040001aabbcc000a000000006400000bb8000027100a000064000000000000000000000000041c010000000a00000a0000000000000000000000000000000000000000$642fedafe1f374bd2fdd8f1ba81d87a2", "password"},
{"$hsrp$0128020006040001aabbcc001400000000c800000bb8000027100a000064000000000000000000000000041c010000000a0000140000000000000000000000000000000000000000$0481257f0fe583b275f03a48e88de72f", "password12345"},
{NULL}
};
static char (*saved_key)[64]; // 1 full limb of MD5, we do out work IN this buffer.
static MD5_CTX (*saved_ctx);
static int *saved_len, dirty;
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int length;
unsigned char salt[2048]; // be safe ;)
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
saved_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_ctx));
}
static void done(void)
{
MEM_FREE(saved_ctx);
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q || q+1==p)
return 0;
q = q + 1;
// if ((q - p - 1) > REAL_SALT_SIZE * 2)
// return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q))
return 0;
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
if (q-p > (sizeof(cur_salt->salt)-1)*2)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i, len;
memset(&cs, 0, SALT_SIZE);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
cs.length = len;
return &cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// this place would normally contain "print_hex" but I do not want to piss of magnum (yet again)
#define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val)
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
MD5_CTX ctx;
int len = saved_len[index];
if (dirty) {
// we use the saved_key buffer in-line.
unsigned int *block = (unsigned int*)saved_key[index];
MD5_Init(&saved_ctx[index]);
// set bit
saved_key[index][len] = 0x80;
block[14] = len << 3;
#if (ARCH_LITTLE_ENDIAN==0)
block[14] = JOHNSWAP(block[14]);
#endif
MD5_Update(&saved_ctx[index], (unsigned char*)block, 64);
// clear the bit, so that get_key returns proper key.
saved_key[index][len] = 0;
}
memcpy(&ctx, &saved_ctx[index], sizeof(MD5_CTX));
// data
MD5_Update(&ctx, cur_salt->salt, cur_salt->length);
// key (again)
MD5_Update(&ctx, saved_key[index], len);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void hsrp_set_key(char *key, int index)
{
int olen = saved_len[index];
int len= strlen(key);
saved_len[index] = len;
strcpy(saved_key[index], key);
if (olen > len)
memset(&(saved_key[index][len]), 0, olen-len);
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_hsrp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
hsrp_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
DRB005-indirectaccess1-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program is extracted from a real application at LLNL.
Two pointers (xa1 and xa2) have a pair of values with a distance of 12.
They are used as start base addresses for two 1-D arrays.
Their index set has two indices with distance of 12: 999 +12 = 1011.
So there is loop carried dependence.
However, having loop carried dependence does not mean data races will always happen.
The iterations with loop carried dependence must be scheduled to
different threads in order for data races to happen.
In this example, we use schedule(static,1) to increase the chance that
the dependent loop iterations will be scheduled to different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
/* change original 921 to 923 = 911+12 */
int indexSet[180] = {521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 923, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013};
int main(int argc, char * argv[])
{
/* max index value is 2013. +12 to obtain a valid xa2[idx] after xa1+12. */
/* +1 to ensure a reference like base[2015] is within the bound. */
double * base = (double * )malloc(sizeof (double)*((2013+12)+1));
double * xa1 = base;
double * xa2 = xa1+12;
int i;
int _ret_val_0;
if (base==0)
{
printf("Error in malloc(). Aborting ...\n");
_ret_val_0=1;
return _ret_val_0;
}
/* initialize segments touched by indexSet */
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for
for (i=521; i<=2025; ++ i)
{
base[i]=(0.5*i);
}
/* default static even scheduling may not trigger data race, using static,1 instead. */
#pragma loop name main#1
for (i=0; i<180; ++ i)
{
int idx = indexSet[i];
xa1[idx]+=(1.0+i);
xa2[idx]+=(3.0+i);
}
printf("x1[999]=%lf xa2[1285]=%lf\n", xa1[999], xa2[1285]);
free(base);
_ret_val_0=0;
return _ret_val_0;
}
|
FBGemmFPTest.h | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <random>
#include <gtest/gtest.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "./TestUtils.h"
#include "bench/AlignedVec.h"
#include "bench/BenchUtils.h"
#include "fbgemm/FbgemmPackMatrixB.h"
#include "src/RefImplementations.h"
#ifdef USE_IACA
#include "iacaMarks.h"
#endif
namespace fbgemm {
/*
* @brief Abstract of the GEMM FP test
* The template parameter is transpose of A and B
*/
template<typename T>
class FBGemmFPTest
: public testing::TestWithParam<std::pair<fbgemm::matrix_op_t, fbgemm::matrix_op_t>> {
protected:
std::vector<std::vector<int>> GenShapes() const {
std::vector<std::vector<int>> shapes;
std::random_device r;
std::default_random_engine generator(r());
std::uniform_int_distribution<int> dm(1, 256);
std::uniform_int_distribution<int> dnk(1, 1024);
for (int i = 0; i < 10; i++) {
int m = dm(generator);
int n = dnk(generator);
int k = dnk(generator);
shapes.push_back({m, n, k});
}
return shapes;
}
void TestRun() {
auto shapes = GenShapes();
float alpha = 1.f, beta = 0.f;
matrix_op_t atrans, btrans;
std::tie(atrans, btrans) = GetParam();
for (auto s : shapes) {
int m = s[0];
int n = s[1];
int k = s[2];
std::cerr << "m = " << m << " n = " << n << " k = " << k;
if (atrans == matrix_op_t::Transpose) {
std::cerr << " A_transposed";
}
if (btrans == matrix_op_t::Transpose) {
std::cerr << " B_transposed";
}
std::cerr << std::endl;
// initialize with small numbers
aligned_vector<int> Aint(m * k);
aligned_vector<int> Bint(k * n);
randFill(Aint, 0, 4);
randFill(Bint, 0, 4);
aligned_vector<float> A(Aint.begin(), Aint.end());
aligned_vector<float> B(Bint.begin(), Bint.end());
aligned_vector<float> C(m * n, NAN);
aligned_vector<float> A_ref(A), B_ref(B), C_ref(C);
// Gold via reference sgemm
cblas_sgemm_ref(
atrans,
btrans,
m,
n,
k,
1.0f,
A_ref.data(),
atrans == matrix_op_t::Transpose ? m : k,
B_ref.data(),
btrans == matrix_op_t::Transpose ? k : n,
0.0f,
C_ref.data(),
n);
PackedGemmMatrixB<T> Bp(btrans, k, n, alpha, B.data());
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int num_threads = fbgemm_get_num_threads();
int tid = fbgemm_get_thread_num();
cblas_gemm_compute(
atrans, m, A.data(), Bp, beta, C.data(), tid, num_threads);
}
// correctness check
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
float expected = C_ref[i * n + j];
float actual = C[i * n + j];
EXPECT_EQ(actual, expected)
<< "GEMM results differ at (" << i << ", " << j << "). ref "
<< expected << " FBGemm " << actual;
}
}
}
}
void UnpackTestRun() {
auto shapes = GenShapes();
float alpha = 1.f, beta = 0.f;
matrix_op_t atrans, btrans;
std::tie(atrans, btrans) = GetParam();
for (auto s : shapes) {
int m = s[0];
int n = s[1];
int k = s[2];
std::cerr << "m = " << m << " n = " << n << " k = " << k;
if (atrans == matrix_op_t::Transpose) {
std::cerr << " A_transposed";
}
if (btrans == matrix_op_t::Transpose) {
std::cerr << " B_transposed";
}
std::cerr << std::endl;
// initialize with small numbers
aligned_vector<int> Aint(m * k);
aligned_vector<int> Bint(k * n);
randFill(Aint, 0, 4);
randFill(Bint, 0, 4);
aligned_vector<float> A(Aint.begin(), Aint.end());
aligned_vector<float> B(Bint.begin(), Bint.end());
aligned_vector<float> C(m * n, NAN);
aligned_vector<float> A_ref(A), B_ref(B), C_ref(C);
// Gold via reference sgemm
cblas_sgemm_ref(
atrans,
btrans,
m,
n,
k,
1.0f,
A_ref.data(),
atrans == matrix_op_t::Transpose ? m : k,
B_ref.data(),
btrans == matrix_op_t::Transpose ? k : n,
0.0f,
C_ref.data(),
n);
// fbgemm fp16
PackedGemmMatrixB<T> Bp(btrans, k, n, alpha, B.data());
EXPECT_TRUE(Bp.packed());
// Test unpack
aligned_vector<T> tmp(Bp.matSize());
memcpy(tmp.data(), Bp.pmat(), Bp.matSize() * sizeof(T));
Bp.unpackFromSrc(btrans, tmp.data());
EXPECT_FALSE(Bp.packed());
memcpy(tmp.data(), Bp.pmat(), Bp.matSize() * sizeof(T));
for (int i = 0; i < k; ++i) {
for (int j = 0; j < n; ++j) {
EXPECT_EQ(
sizeof(T) == sizeof(float16)
? cpu_half2float(tmp[i * n + j])
: tmp[i * n + j],
B[i * n + j]);
}
}
// Pack it back
Bp.packFromSrc(btrans, tmp.data());
EXPECT_TRUE(Bp.packed());
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int num_threads = fbgemm_get_num_threads();
int tid = fbgemm_get_thread_num();
cblas_gemm_compute(
atrans, m, A.data(), Bp, beta, C.data(), tid, num_threads);
}
// correctness check
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
float expected = C_ref[i * n + j];
float actual = C[i * n + j];
EXPECT_EQ(actual, expected)
<< "GEMM results differ at (" << i << ", " << j << "). ref "
<< expected << " FBGemm " << actual;
}
}
}
}
};
} // namespace fbgemm
|
pzlanhe.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n)
/***************************************************************************//**
* Parallel tile calculation of max, one, infinity or Frobenius matrix norm
* for a Hermitian matrix.
******************************************************************************/
void plasma_pzlanhe(plasma_enum_t norm, plasma_enum_t uplo,
plasma_desc_t A, double *work, double *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
switch (norm) {
double stub;
double *workspace;
double *scale;
double *sumsq;
//================
// PlasmaMaxNorm
//================
case PlasmaMaxNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
if (uplo == PlasmaLower) {
for (int n = 0; n < m; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange(PlasmaMaxNorm,
mvam, nvan,
A(m, n), ldam,
&stub, &work[A.mt*n+m],
sequence, request);
}
}
else { // PlasmaUpper
for (int n = m+1; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange(PlasmaMaxNorm,
mvam, nvan,
A(m, n), ldam,
&stub, &work[A.mt*n+m],
sequence, request);
}
}
plasma_core_omp_zlanhe(PlasmaMaxNorm, uplo,
mvam,
A(m, m), ldam,
&stub, &work[A.mt*m+m],
sequence, request);
}
#pragma omp taskwait
plasma_core_omp_dlansy(PlasmaMaxNorm, uplo,
A.nt,
work, A.mt,
&stub, value,
sequence, request);
break;
//================
// PlasmaOneNorm
//================
case PlasmaOneNorm:
case PlasmaInfNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
if (uplo == PlasmaLower) {
for (int n = 0; n < m; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange_aux(PlasmaOneNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.n*m+n*A.nb],
sequence, request);
plasma_core_omp_zlange_aux(PlasmaInfNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.n*n+m*A.nb],
sequence, request);
}
}
else { // PlasmaUpper
for (int n = m+1; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange_aux(PlasmaOneNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.n*m+n*A.nb],
sequence, request);
plasma_core_omp_zlange_aux(PlasmaInfNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.n*n+m*A.nb],
sequence, request);
}
}
plasma_core_omp_zlanhe_aux(PlasmaOneNorm, uplo,
mvam,
A(m, m), ldam,
&work[A.n*m+m*A.nb],
sequence, request);
}
#pragma omp taskwait
workspace = work + A.mt*A.n;
plasma_core_omp_dlange(PlasmaInfNorm,
A.n, A.mt,
work, A.n,
workspace, value,
sequence, request);
break;
//======================
// PlasmaFrobeniusNorm
//======================
case PlasmaFrobeniusNorm:
scale = work;
sumsq = work + A.mt*A.nt;
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
if (uplo == PlasmaLower) {
for (int n = 0; n < m; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zgessq(mvam, nvan,
A(m, n), ldam,
&scale[A.mt*n+m], &sumsq[A.mt*n+m],
sequence, request);
}
}
else { // PlasmaUpper
for (int n = m+1; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zgessq(mvam, nvan,
A(m, n), ldam,
&scale[A.mt*m+n], &sumsq[A.mt*m+n],
sequence, request);
}
}
plasma_core_omp_zhessq(uplo,
mvam,
A(m, m), ldam,
&scale[A.mt*m+m], &sumsq[A.mt*m+m],
sequence, request);
}
#pragma omp taskwait
plasma_core_omp_dsyssq_aux(A.mt, A.nt,
scale, sumsq,
value,
sequence, request);
break;
}
}
|
GB_unop__acos_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fc32_fc32)
// op(A') function: GB (_unop_tran__acos_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = cacosf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacosf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = cacosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacosf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GoL_p.c | /******************************************************
************* Conway's game of life ******************
******************************************************
Usage: ./exec ArraySize TimeSteps
Compile with -DOUTPUT to print output in output.gif
(You will need ImageMagick for that - Install with
sudo apt-get install imagemagick)
WARNING: Do not print output for large array sizes!
or multiple time steps!
******************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#define FINALIZE "\
convert -delay 20 out*.pgm output.gif\n\
rm *pgm\n\
"
int ** allocate_array(int N);
void free_array(int ** array, int N);
void init_random(int ** array1, int ** array2, int N);
void print_to_pgm( int ** array, int N, int t );
int main (int argc, char * argv[]) {
int N; //array dimensions
int T; //time steps
int ** current, ** previous; //arrays - one for current timestep, one for previous timestep
int ** swap; //array pointer
int t, i, j, nbrs; //helper variables
double time; //variables for timing
struct timeval ts,tf;
/*Read input arguments*/
if ( argc != 3 ) {
fprintf(stderr, "Usage: ./exec ArraySize TimeSteps\n");
exit(-1);
}
else {
N = atoi(argv[1]);
T = atoi(argv[2]);
}
/*Allocate and initialize matrices*/
current = allocate_array(N); //allocate array for current time step
previous = allocate_array(N); //allocate array for previous time step
init_random(previous, current, N); //initialize previous array with pattern
#ifdef OUTPUT
print_to_pgm(previous, N, 0);
#endif
/*Game of Life*/
gettimeofday(&ts,NULL);
for ( t = 0 ; t < T ; t++ ) {
/* Use OpenMP parallel for in order to parallelize i and j loops */
#pragma omp parallel for shared(N, previous, current) private(i, j, nbrs)
for ( i = 1 ; i < N-1 ; i++ )
for ( j = 1 ; j < N-1 ; j++ ) {
nbrs = previous[i+1][j+1] + previous[i+1][j] + previous[i+1][j-1] \
+ previous[i][j-1] + previous[i][j+1] \
+ previous[i-1][j-1] + previous[i-1][j] + previous[i-1][j+1];
if ( nbrs == 3 || ( previous[i][j]+nbrs ==3 ) )
current[i][j]=1;
else
current[i][j]=0;
}
#ifdef OUTPUT
print_to_pgm(current, N, t+1);
#endif
//Swap current array with previous array
swap=current;
current=previous;
previous=swap;
}
gettimeofday(&tf,NULL);
time=(tf.tv_sec-ts.tv_sec)+(tf.tv_usec-ts.tv_usec)*0.000001;
free_array(current, N);
free_array(previous, N);
printf("GameOfLife: Size %d Steps %d Time %lf\n", N, T, time);
#ifdef OUTPUT
system(FINALIZE);
#endif
}
int ** allocate_array(int N) {
int ** array;
int i,j;
array = malloc(N * sizeof(int*));
for ( i = 0; i < N ; i++ )
array[i] = malloc( N * sizeof(int));
for ( i = 0; i < N ; i++ )
for ( j = 0; j < N ; j++ )
array[i][j] = 0;
return array;
}
void free_array(int ** array, int N) {
int i;
for ( i = 0 ; i < N ; i++ )
free(array[i]);
free(array);
}
void init_random(int ** array1, int ** array2, int N) {
int i,pos,x,y;
for ( i = 0 ; i < (N * N)/10 ; i++ ) {
pos = rand() % ((N-2)*(N-2));
array1[pos%(N-2)+1][pos/(N-2)+1] = 1;
array2[pos%(N-2)+1][pos/(N-2)+1] = 1;
}
}
void print_to_pgm(int ** array, int N, int t) {
int i,j;
char * s = malloc(30*sizeof(char));
sprintf(s,"out%d.pgm",t);
FILE * f = fopen(s,"wb");
fprintf(f, "P5\n%d %d 1\n", N,N);
for ( i = 0; i < N ; i++ )
for ( j = 0; j < N ; j++)
if ( array[i][j]==1 )
fputc(1,f);
else
fputc(0,f);
fclose(f);
free(s);
}
|
vdtactivelist.h | #ifndef _VDT_ACTIVE_LIST_
#define _VDT_ACTIVE_LIST_
#define FALSE 0
#define TRUE 1
#ifndef NULL
#define NULL 0
#endif
#include <stdio.h>
#include <stdlib.h>
// ptrDataType should be a pointer.
template <class ptrDataType>
class CActiveList
{
public :
ptrDataType m_pStart, m_pEnd;
int m_Size;
// iterator pointer
ptrDataType m_pCurrentNode;
// Mark
ptrDataType m_pMark; // store one node for user-defined purpose.
inline CActiveList (void);
inline ~CActiveList (void);
inline void InitList (ptrDataType pStart, ptrDataType pEnd);
inline int Delete (ptrDataType pNode);
inline void Clear(bool deleteNodes = false);
inline void Add (ptrDataType pNode);
inline void ForceAdd (ptrDataType pNode);
inline void AddNext (ptrDataType pPivotNode, ptrDataType pNode);
inline void AddBefore (ptrDataType pPivotNode, ptrDataType pNode);
inline void AddatEnd (ptrDataType pNode);
inline ptrDataType Head (void);
inline ptrDataType End (void);
inline int IsEmpty (void);
inline int Size (void);
// operation with Mark
inline int DeleteWithMark (ptrDataType pNode);
inline void SetMark (ptrDataType pNode);
inline ptrDataType GetMark (void);
// simple iterator.
inline void InitIteration (void);
inline void InitIteration (ptrDataType pNode);
inline void SetCurrent (ptrDataType pNode);
inline int IsEnd (void);
inline ptrDataType GetCurrent (void);
inline void Advance (void);
inline void BackAdvance (void);
};
template <class ptrDataType>
inline CActiveList <ptrDataType>::CActiveList (void)
{
m_pStart = m_pEnd = NULL;
//InitList ();
/*
m_pStart = new ptrDataType;
m_pEnd = new ptrDataType;
InitList (m_pStart, m_pEnd);
*/
}
template <class ptrDataType>
inline CActiveList <ptrDataType>::~CActiveList (void)
{
if (m_pStart != NULL) {
delete m_pStart;
m_pStart = NULL;
}
if (m_pEnd != NULL) {
delete m_pEnd;
m_pEnd = NULL;
}
}
// to use this class, we have create Start and End node. then assign the pointers of this
// to this function.
template <class ptrDataType>
inline void CActiveList <ptrDataType>::InitList (ptrDataType pStart, ptrDataType pEnd)
{
m_pStart = pStart;
m_pEnd = pEnd;
// make a double list.
m_pStart->m_pPrev = NULL;
m_pStart->m_pNext = m_pEnd;
m_pEnd->m_pPrev = m_pStart;
m_pEnd->m_pNext = NULL;
m_Size = 0;
m_pCurrentNode = 0;
m_pMark = 0;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::IsEmpty (void)
{
if (m_pStart->m_pNext == m_pEnd)
return TRUE;
return FALSE;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::Delete (ptrDataType pNode)
{
if (pNode->m_pNext == NULL || pNode->m_pPrev == NULL) // if this isn't an active one.
return FALSE;
if (pNode == m_pCurrentNode)
SetCurrent (m_pCurrentNode->m_pPrev);
pNode->m_pPrev->m_pNext = pNode->m_pNext;
pNode->m_pNext->m_pPrev = pNode->m_pPrev;
pNode->m_pPrev = NULL;
pNode->m_pNext = NULL;
m_Size--;
return TRUE;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::Clear (bool deleteNodes)
{
m_Size = 0;
if (deleteNodes)
{
ptrDataType cur, next;
cur = m_pStart->m_pNext;
while (0 != cur && m_pEnd != cur)
{
next = cur->m_pNext;
delete cur;
cur = next;
}
}
m_pEnd->m_pPrev = m_pStart;
m_pStart->m_pNext = m_pEnd;
}
// if Mark is deleted, changed Mark into next one.
template <class ptrDataType>
inline int CActiveList <ptrDataType>::DeleteWithMark (ptrDataType pNode)
{
if (pNode->m_pNext == NULL || pNode->m_pPrev == NULL) // temporary solution.
return FALSE;
if (pNode == m_pMark) { // user-purpose.
m_pMark = m_pMark->m_pNext;
}
pNode->m_pPrev->m_pNext = pNode->m_pNext;
pNode->m_pNext->m_pPrev = pNode->m_pPrev;
pNode->m_pPrev = NULL;
pNode->m_pNext = NULL;
m_Size--;
return TRUE; // it means it delete element.
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::SetMark (ptrDataType pNode)
{
m_pMark = pNode;
}
template <class ptrDataType>
inline ptrDataType CActiveList <ptrDataType>::GetMark (void)
{
return m_pMark;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::Add (ptrDataType pNode)
{
if (pNode->m_pNext != NULL) // already inserted in list
return;
// add node after m_Start, which is a root node
pNode->m_pNext = m_pStart->m_pNext;
pNode->m_pPrev = m_pStart;
pNode->m_pNext->m_pPrev = pNode;
m_pStart->m_pNext = pNode;
m_Size++;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::ForceAdd (ptrDataType pNode)
{
#ifdef _USE_OPENMP
#pragma omp critical
#endif
{
if (pNode->m_pNext != NULL) { // already inserted in list
Delete (pNode);
}
// add node after m_Start, which is a root node
pNode->m_pNext = m_pStart->m_pNext;
pNode->m_pPrev = m_pStart;
pNode->m_pNext->m_pPrev = pNode;
m_pStart->m_pNext = pNode;
m_Size++;
}
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::AddatEnd (ptrDataType pNode)
{
if (pNode->m_pNext != NULL) // already inserted in list
return;
// add node before m_pEnd, which is a root node
pNode->m_pNext = m_pEnd;
pNode->m_pPrev = m_pEnd->m_pPrev;
m_pEnd->m_pPrev->m_pNext = pNode;
m_pEnd->m_pPrev = pNode;
m_Size++;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::AddNext (ptrDataType pPivotNode, ptrDataType pNode)
{
if (pNode->m_pNext != NULL) { // already inserted in list
// printf ("To check if it might be unnecessary code.\n");
// exit (-1);
return;
}
// add node after m_pPivotNode, which is a root node
pNode->m_pNext = pPivotNode->m_pNext;
pNode->m_pPrev = pPivotNode;
pNode->m_pNext->m_pPrev = pNode;
pPivotNode->m_pNext = pNode;
m_Size++;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::AddBefore (ptrDataType pPivotNode, ptrDataType pNode)
{
if (pNode->m_pNext != NULL) { // already inserted in list
// printf ("To check if it might be unnecessary code.\n");
// exit (-1);
return;
}
// add node before m_pPivotNode
//
pNode->m_pNext = pPivotNode;
pNode->m_pPrev = pPivotNode->m_pPrev;
pPivotNode->m_pPrev->m_pNext = pNode;
pPivotNode->m_pPrev = pNode;
m_Size++;
}
template <class ptrDataType>
inline ptrDataType CActiveList <ptrDataType>::Head (void)
{
return m_pStart->m_pNext;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::Size (void)
{
return m_Size;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::InitIteration (void)
{
ptrDataType pRootNode = Head ();
//SetCurrent (pRootNode);
// above code produce message if list is empty.
m_pCurrentNode = pRootNode;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::InitIteration (ptrDataType pNode)
{
ptrDataType pRootNode = pNode;
//SetCurrent (pRootNode);
// above code produce message if list is empty.
m_pCurrentNode = pRootNode;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::SetCurrent (ptrDataType pNode)
{
#ifdef DEBUG_MODE
if (pNode->m_pNext == NULL) {
printf ("Error : Invalid Current Node\n");
exit (-1);
}
#endif
m_pCurrentNode = pNode;
}
template <class ptrDataType>
inline int CActiveList <ptrDataType>::IsEnd (void)
{
if (m_pCurrentNode == m_pEnd)
return TRUE;
return FALSE;
}
template <class ptrDataType>
inline ptrDataType CActiveList <ptrDataType>::GetCurrent (void)
{
return m_pCurrentNode;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::Advance (void)
{
m_pCurrentNode = m_pCurrentNode->m_pNext;
}
template <class ptrDataType>
inline void CActiveList <ptrDataType>::BackAdvance (void)
{
m_pCurrentNode = m_pCurrentNode->m_pPrev;
}
#endif
|
GB_compiler.h | //------------------------------------------------------------------------------
// GB_compiler.h: handle compiler variations
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#ifndef GB_COMPILER_H
#define GB_COMPILER_H
//------------------------------------------------------------------------------
// determine which compiler is in use
//------------------------------------------------------------------------------
#if defined ( __NVCC__ )
// NVIDIA nvcc compiler
#define GB_COMPILER_NVCC 1
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR __CUDACC_VER_MAJOR__
#define GB_COMPILER_MINOR __CUDACC_VER_MINOR__
#define GB_COMPILER_SUB __CUDACC_VER_BUILD__
#define GB_COMPILER_NAME "nvcc"
#elif defined ( __INTEL_CLANG_COMPILER )
// Intel icx compiler, 2022.0.0 based on clang/llvm 14.0.0
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 1
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR __INTEL_CLANG_COMPILER
#define GB_COMPILER_MINOR 0
#define GB_COMPILER_SUB 0
#define GB_COMPILER_NAME __VERSION__
#elif defined ( __INTEL_COMPILER )
// Intel icc compiler: 2021.5.0 uses "gcc 7.5 mode"
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 1
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR __INTEL_COMPILER
#define GB_COMPILER_MINOR __INTEL_COMPILER_UPDATE
#define GB_COMPILER_SUB 0
#define GB_COMPILER_NAME __VERSION__
#elif defined ( __clang__ )
// clang
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 1
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR __clang_major__
#define GB_COMPILER_MINOR __clang_minor__
#define GB_COMPILER_SUB __clang_patchlevel__
#define GB_COMPILER_NAME "clang " __clang_version__
#elif defined ( __xlC__ )
// xlc
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 1
#define GB_COMPILER_MAJOR ( __xlC__ / 256 )
#define GB_COMPILER_MINOR ( __xlC__ - 256 * GB_COMPILER_MAJOR)
#define GB_COMPILER_SUB 0
#define GB_COMPILER_NAME "IBM xlc " GB_XSTR (__xlC__)
#elif defined ( __GNUC__ )
// gcc
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 1
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR __GNUC__
#define GB_COMPILER_MINOR __GNUC_MINOR__
#define GB_COMPILER_SUB __GNUC_PATCHLEVEL__
#define GB_COMPILER_NAME "GNU gcc " GB_XSTR (__GNUC__) "." \
GB_XSTR (__GNUC_MINOR__) "." GB_XSTR (__GNUC_PATCHLEVEL__)
#elif defined ( _MSC_VER )
// Microsoft Visual Studio
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 1
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR ( _MSC_VER / 100 )
#define GB_COMPILER_MINOR ( _MSC_VER - 100 * GB_COMPILER_MAJOR)
#define GB_COMPILER_SUB 0
#define GB_COMPILER_NAME "Microsoft Visual Studio " GB_XSTR (_MSC_VER)
#else
// other compiler
#define GB_COMPILER_NVCC 0
#define GB_COMPILER_ICX 0
#define GB_COMPILER_ICC 0
#define GB_COMPILER_CLANG 0
#define GB_COMPILER_GCC 0
#define GB_COMPILER_MSC 0
#define GB_COMPILER_XLC 0
#define GB_COMPILER_MAJOR 0
#define GB_COMPILER_MINOR 0
#define GB_COMPILER_SUB 0
#define GB_COMPILER_NAME "other C compiler"
#endif
//------------------------------------------------------------------------------
// malloc.h: required include file for Microsoft Visual Studio
//------------------------------------------------------------------------------
#if GB_COMPILER_MSC
#include <malloc.h>
#endif
//------------------------------------------------------------------------------
// OpenMP pragmas and tasks
//------------------------------------------------------------------------------
// GB_PRAGMA(x) becomes "#pragma x", but the way to do this depends on the
// compiler:
#if GB_COMPILER_MSC
// MS Visual Studio is not ANSI C11 compliant, and uses __pragma:
#define GB_PRAGMA(x) __pragma (x)
// no #pragma omp simd is available in MS Visual Studio
#define GB_PRAGMA_SIMD
#define GB_PRAGMA_SIMD_REDUCTION(op,s)
#else
// ANSI C11 compilers use _Pragma:
#define GB_PRAGMA(x) _Pragma (#x)
// create two kinds of SIMD pragmas:
// GB_PRAGMA_SIMD becomes "#pragma omp simd"
// GB_PRAGMA_SIMD_REDUCTION (+,cij) becomes
// "#pragma omp simd reduction(+:cij)"
#define GB_PRAGMA_SIMD GB_PRAGMA (omp simd)
#define GB_PRAGMA_SIMD_REDUCTION(op,s) GB_PRAGMA (omp simd reduction(op:s))
#endif
//------------------------------------------------------------------------------
// variable-length arrays
//------------------------------------------------------------------------------
// If variable-length arrays are not supported, user-defined types are limited
// in size to 128 bytes or less. Many of the type-generic routines allocate
// workspace for a single scalar of variable size, using a statement:
//
// GB_void aij [xsize] ;
//
// To support non-variable-length arrays in ANSI C95 or earlier, this is used:
//
// GB_void aij [GB_VLA(xsize)] ;
//
// GB_VLA(xsize) is either defined as xsize (for ANSI C99 or later), or a fixed
// size of 128, in which case user-defined types
// are limited to a max of 128 bytes.
#if GB_COMPILER_NVCC
// NVIDIA nvcc compiler for host or device code
#define GB_HAS_VLA 1
#elif GB_COMPILER_MSC
// Microsoft Visual Studio does not support variable-length arrays.
#define GB_HAS_VLA 0
#elif defined ( __cplusplus )
#define GB_HAS_VLA 1
#elif GxB_STDC_VERSION >= 199901L
// ANSI C99 and later
#define GB_HAS_VLA 1
#else
// ANSI C95 and earlier
#define GB_HAS_VLA 0
#endif
#ifdef PGI_COMPILER_BUG
// If GraphBLAS is compiled with -DPGI_COMPILER_BUG, then a workaround is
// enabled for a bug in the PGI compiler. The compiler does not correctly
// handle automatic arrays of variable size.
#undef GB_HAS_VLA
#define GB_HAS_VLA 0
#endif
#if ( GB_HAS_VLA )
// variable-length arrays are allowed
#define GB_VLA(s) s
#else
// variable-length arrays are not allowed
#define GB_VLA_MAXSIZE 128
#define GB_VLA(s) GB_VLA_MAXSIZE
#endif
//------------------------------------------------------------------------------
// AVX2 and AVX512F support for the x86_64 architecture
//------------------------------------------------------------------------------
// gcc 7.5.0 cannot compile code with __attribute__ ((target ("avx512f"))), or
// avx2 (it triggers a bug in the compiler), but those targets are fine with
// gcc 9.3.0 or later. It might be OK on gcc 8.x but I haven't tested this.
#if GBX86
#if GB_COMPILER_GCC
#if __GNUC__ >= 9
// enable avx512f on gcc 9.x and later
#define GB_COMPILER_SUPPORTS_AVX512F 1
#define GB_COMPILER_SUPPORTS_AVX2 1
#else
// disable avx2 and avx512f on gcc 8.x and earlier
#define GB_COMPILER_SUPPORTS_AVX512F 0
#define GB_COMPILER_SUPPORTS_AVX2 0
#endif
#elif GB_COMPILER_ICX || GB_COMPILER_ICC || GB_COMPILER_CLANG
// all these compilers can handle AVX512F and AVX2 on x86
#define GB_COMPILER_SUPPORTS_AVX512F 1
#define GB_COMPILER_SUPPORTS_AVX2 1
#else
// unsure if xlc can handle AVX, but it is not likely to be used on
// the x86 anyway. cpu_features is disabled for MS Visual Studio.
#define GB_COMPILER_SUPPORTS_AVX512F 0
#define GB_COMPILER_SUPPORTS_AVX2 0
#endif
#else
// non-X86_64 architecture
#define GB_COMPILER_SUPPORTS_AVX512F 0
#define GB_COMPILER_SUPPORTS_AVX2 0
#endif
// prefix for function with target avx512f
#if GB_COMPILER_SUPPORTS_AVX512F
#if (defined (_WIN64) || defined (_WIN32)) && \
(GB_COMPILER_ICC || GB_COMPILER_ICX)
// the Intel compilers on Windows support this feature:
#define GB_TARGET_AVX512F __declspec (target ("avx512f"))
#else
#define GB_TARGET_AVX512F __attribute__ ((target ("avx512f")))
#endif
#else
#define GB_TARGET_AVX512F
#endif
// prefix for function with target avx2
#if GB_COMPILER_SUPPORTS_AVX2
#if (defined (_WIN64) || defined (_WIN32)) && \
(GB_COMPILER_ICC || GB_COMPILER_ICX)
// the Intel compilers on Windows support this feature:
#define GB_TARGET_AVX2 __declspec (target ("avx2"))
#else
#define GB_TARGET_AVX2 __attribute__ ((target ("avx2")))
#endif
#else
#define GB_TARGET_AVX2
#endif
#endif
|
knn_utils.h | /*
*
* Copyright (c) 2019, BIOVAULT (Leiden University Medical Center, Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BIOVAULT ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef KNN_H
#define KNN_H
#include <vector>
#include <map>
#include <string>
#include <thread>
#include <mutex>
namespace hdi{
namespace dr{
enum knn_library
{
KNN_HNSW = 0,
KNN_ANNOY = 1
};
enum knn_distance_metric
{
KNN_METRIC_EUCLIDEAN = 0,
KNN_METRIC_COSINE = 1,
KNN_METRIC_INNER_PRODUCT = 2,
KNN_METRIC_MANHATTAN = 3,
KNN_METRIC_HAMMING = 4,
KNN_METRIC_DOT = 5
};
//! Returns the number of supported KNN libraries.
//! This function should be considered deprecated and kept for backward comppatibility. New code should use the supported_knn_libraries function.
int HierarchicalSNE_NrOfKnnAlgorithms();
//! Returns both the name/label and index of the supported KNN libraries since this can depend on compiler support.
//! This function is especially useful for building user-interfaces where the user can select which KNN library to use for a specific task (e.g. t-SNE or HSNE).
//! Alternatively it can be used to offer a look-up table to translate the currently set KNN Library index back to human readable text.
std::map<std::string, int> supported_knn_libraries();
//! Returns the name/label and index of distance metrics supported by a specific KNN library.
//! This function is especially useful for building user-interfaces where the user can select both a KNN library and a distance metric since not every KNN library supports the same distance metric.
//! Alternatively it can be used to offer a look-up table to translate the currently set KNN distance metric index back to human readable text.
std::map<std::string, int> supported_knn_library_distance_metrics(int knn_lib);
}
}
namespace hnswlib {
/*
* replacement for the openmp '#pragma omp parallel for' directive
* only handles a subset of functionality (no reductions etc)
* Process ids from start (inclusive) to end (EXCLUSIVE)
*
* The method is borrowed from nmslib https://github.com/nmslib/nmslib/blob/v2.1.1/similarity_search/include/thread_pool.h#L62
* and used in the hnswlib examples as well https://github.com/nmslib/hnswlib/blob/v0.5.0/examples/updates_test.cpp
*/
template<class Function>
inline void ParallelFor(size_t start, size_t end, size_t numThreads, Function fn) {
if (numThreads <= 0) {
numThreads = std::thread::hardware_concurrency();
}
if (numThreads == 1) {
for (size_t id = start; id < end; id++) {
fn(id, 0);
}
}
else {
std::vector<std::thread> threads;
std::atomic<size_t> current(start);
// keep track of exceptions in threads
// https://stackoverflow.com/a/32428427/1713196
std::exception_ptr lastException = nullptr;
std::mutex lastExceptMutex;
for (size_t threadId = 0; threadId < numThreads; ++threadId) {
threads.push_back(std::thread([&, threadId] {
while (true) {
size_t id = current.fetch_add(1);
if ((id >= end)) {
break;
}
try {
fn(id, threadId);
}
catch (...) {
std::unique_lock<std::mutex> lastExcepLock(lastExceptMutex);
lastException = std::current_exception();
/*
* This will work even when current is the largest value that
* size_t can fit, because fetch_add returns the previous value
* before the increment (what will result in overflow
* and produce 0 instead of current + 1).
*/
current = end;
break;
}
}
}));
}
for (auto &thread : threads) {
thread.join();
}
if (lastException) {
std::rethrow_exception(lastException);
}
}
}
}
#endif // KNN_H
|
distribute_PEs.c | #define N 1000
#define NB_CLUSTERS 4
#define NB_PES 16
#define MIN(x, y) ((x) < (y) ? x : y)
#include <stdio.h>
/* Initialize an array between 2 given lines */
void init_array(int a[N][N], int begin, int end) {
for (int i = begin; i < end; i++)
for (int j = 0; j < N; j++)
a[i][j] = 2*i + 3*j;
}
int main() {
int a[N][N];
int slice = N/(NB_CLUSTERS*NB_PES);
/* Launch enough OpenMP thread to control all the fabric:
Assume that the runtime allows enough threads with nested
parallelism */
#pragma omp parallel for num_threads(NB_CLUSTERS)
for (int cluster = 0; cluster < NB_CLUSTERS; cluster++)
#pragma omp parallel for num_threads(NB_PES)
for (int pe = 0; pe < NB_PES; pe++) {
/* So now the iterations should be distributed with 1
iteration/thread, on NB_CLUSTERS*NB_PES threads.
Distribute the initialization on all the fabric: */
int global_pe = cluster*NB_PES + pe;
int begin = slice*global_pe;
int end = MIN(N, slice*(global_pe + 1));
#pragma smecy map(STHORM, cluster, pe)
#pragma smecy arg(a, out, /[begin:end-1][])
init_array(a, begin, end);
}
printf("a[27][42] = %d\n", a[27][42]);
return 0;
}
|
compute_deltat.c | /*
A simple 2D hydro code
(C) Romain Teyssier : CEA/IRFU -- original F90 code
(C) Pierre-Francois Lavallee : IDRIS -- original F90 code
(C) Guillaume Colin de Verdiere : CEA/DAM -- for the C version
*/
/*
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
#include <stdio.h>
#include <string.h>
#include <malloc.h>
// #include <unistd.h>
#include <math.h>
#ifdef HMPP
#undef HMPP
#endif
#include "parametres.h"
#include "compute_deltat.h"
#include "utils.h"
#include "perfcnt.h"
#include "equation_of_state.h"
#define DABS(x) (real_t) fabs((x))
inline void
ComputeQEforRow(const int j,
const real_t Hsmallr,
const int Hnx,
const int Hnxt,
const int Hnyt,
const int Hnxyt,
const int Hnvar,
const int slices, const int Hstep,
real_t * uold,
real_t q[Hnvar][Hstep][Hnxyt], real_t e[Hstep][Hnxyt]
) {
int i, s;
#define IHV(i, j, v) ((i) + Hnxt * ((j) + Hnyt * (v)))
#pragma omp parallel for shared(q, e) private(s, i) COLLAPSE
for (s = 0; s < slices; s++) {
for (i = 0; i < Hnx; i++) {
real_t eken;
real_t tmp;
int idxuID = IHV(i + ExtraLayer, j + s, ID);
int idxuIU = IHV(i + ExtraLayer, j + s, IU);
int idxuIV = IHV(i + ExtraLayer, j + s, IV);
int idxuIP = IHV(i + ExtraLayer, j + s, IP);
q[ID][s][i] = MAX(uold[idxuID], Hsmallr);
q[IU][s][i] = uold[idxuIU] / q[ID][s][i];
q[IV][s][i] = uold[idxuIV] / q[ID][s][i];
eken = half * (Square(q[IU][s][i]) + Square(q[IV][s][i]));
tmp = uold[idxuIP] / q[ID][s][i] - eken;
q[IP][s][i] = tmp;
e[s][i] = tmp;
}
}
{
int nops = slices * Hnx;
FLOPS(5 * nops, 3 * nops, 1 * nops, 0 * nops);
}
#undef IHV
#undef IHVW
}
// to force a parallel reduction with OpenMP
#define WOMP
inline void
courantOnXY(real_t *cournox,
real_t *cournoy,
const int Hnx,
const int Hnxyt,
const int Hnvar, const int slices, const int Hstep, real_t c[Hstep][Hnxyt], real_t q[Hnvar][Hstep][Hnxyt],
real_t *tmpm1,
real_t *tmpm2
)
{
#ifdef WOMP
int s, i;
// real_t maxValC = zero;
real_t tmp1 = *cournox, tmp2 = *cournoy;
#pragma omp parallel for shared(tmpm1, tmpm2) private(s,i) reduction(max:tmp1) reduction(max:tmp2)
for (s = 0; s < slices; s++) {
for (i = 0; i < Hnx; i++) {
tmp1 = MAX(tmp1, c[s][i] + DABS(q[IU][s][i]));
tmp2 = MAX(tmp2, c[s][i] + DABS(q[IV][s][i]));
}
}
*cournox = tmp1;
*cournoy = tmp2;
{
int nops = (slices) * Hnx;
FLOPS(2 * nops, 0 * nops, 2 * nops, 0 * nops);
}
#else
int i, s;
real_t tmp1, tmp2;
for (s = 0; s < slices; s++) {
for (i = 0; i < Hnx; i++) {
tmp1 = c[s][i] + DABS(q[IU][s][i]);
tmp2 = c[s][i] + DABS(q[IV][s][i]);
*cournox = MAX(*cournox, tmp1);
*cournoy = MAX(*cournoy, tmp2);
}
}
{
int nops = (slices) * Hnx;
FLOPS(2 * nops, 0 * nops, 5 * nops, 0 * nops);
}
#endif
#undef IHVW
}
void compute_deltat_init_mem(const hydroparam_t H, hydrowork_t * Hw, hydrovarwork_t * Hvw)
{
Hvw->q = (real_t (*)) DMalloc(H.nvar * H.nxyt * H.nxystep);
Hw->e = (real_t (*)) DMalloc( H.nxyt * H.nxystep);
Hw->c = (real_t (*)) DMalloc( H.nxyt * H.nxystep);
Hw->tmpm1 = (real_t *) DMalloc(H.nxystep);
Hw->tmpm2 = (real_t *) DMalloc(H.nxystep);
}
void compute_deltat_clean_mem(const hydroparam_t H, hydrowork_t * Hw, hydrovarwork_t * Hvw)
{
DFree(&Hvw->q, H.nvar * H.nxyt * H.nxystep);
DFree(&Hw->e, H.nxyt * H.nxystep);
DFree(&Hw->c, H.nxyt * H.nxystep);
DFree(&Hw->tmpm1, H.nxystep);
DFree(&Hw->tmpm2, H.nxystep);
}
void
compute_deltat(real_t *dt, const hydroparam_t H, hydrowork_t * Hw, hydrovar_t * Hv, hydrovarwork_t * Hvw) {
real_t cournox, cournoy;
int j, jend, slices, Hstep, Hmin, Hmax;
real_t (*e)[H.nxyt];
real_t (*c)[H.nxystep];
real_t (*q)[H.nxystep][H.nxyt];
WHERE("compute_deltat");
// compute time step on grid interior
cournox = zero;
cournoy = zero;
c = (real_t (*)[H.nxystep]) Hw->c;
e = (real_t (*)[H.nxystep]) Hw->e;
q = (real_t (*)[H.nxystep][H.nxyt]) Hvw->q;
Hstep = H.nxystep;
Hmin = H.jmin + ExtraLayer;
Hmax = H.jmax - ExtraLayer;
for (j = Hmin; j < Hmax; j += Hstep) {
jend = j + Hstep;
if (jend >= Hmax)
jend = Hmax;
slices = jend - j; // numbre of slices to compute
ComputeQEforRow(j, H.smallr, H.nx, H.nxt, H.nyt, H.nxyt, H.nvar, slices, Hstep, Hv->uold, q, e);
equation_of_state(0, H.nx, H.nxyt, H.nvar, H.smallc, H.gamma, slices, Hstep, e, q, c);
courantOnXY(&cournox, &cournoy, H.nx, H.nxyt, H.nvar, slices, Hstep, c, q, Hw->tmpm1, Hw->tmpm2);
// fprintf(stdout, "[%2d]\t%g %g %g %g\n", H.mype, cournox, cournoy, H.smallc, H.courant_factor);
}
*dt = H.courant_factor * H.dx / MAX(cournox, MAX(cournoy, H.smallc));
FLOPS(1, 1, 2, 0);
// fprintf(stdout, "[%2d]\t%g %g %g %g %g %g\n", H.mype, cournox, cournoy, H.smallc, H.courant_factor, H.dx, *dt);
} // compute_deltat
//EOF
|
ellipticBlockUpdatePCG.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(ellipticBlockUpdatePCG)(const dlong & N,
const dlong & offset,
const dfloat* __restrict__ cpu_invDegree,
const dfloat* __restrict__ cpu_p,
const dfloat* __restrict__ cpu_Ap,
const dfloat & alpha,
dfloat* __restrict__ cpu_x,
dfloat* __restrict__ cpu_r,
dfloat* __restrict__ cpu_rdotr)
{
dfloat rdotr = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2)
#endif
for(int fld = 0; fld < p_Nfields; fld++)
for(int i = 0; i < N; ++i) {
const dlong n = i + fld * offset;
cpu_x[n] += alpha * cpu_p[n];
const dfloat rn = cpu_r[n] - alpha * cpu_Ap[n];
rdotr += rn * rn * cpu_invDegree[i];
cpu_r[n] = rn;
}
cpu_rdotr[0] = rdotr;
}
|
GB_binop__lxor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32)
// A*D function (colscale): GB (_AxD__lxor_uint32)
// D*A function (rowscale): GB (_DxB__lxor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32)
// C=scalar+B GB (_bind1st__lxor_uint32)
// C=scalar+B' GB (_bind1st_tran__lxor_uint32)
// C=A+scalar GB (_bind2nd__lxor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_elimination_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <unordered_set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "utilities/builtin_timer.h"
#include "utilities/atomic_utilities.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard elimination builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
/// Definition of the classes from the base class
typedef typename BaseType::SizeType SizeType;
typedef typename BaseType::IndexType IndexType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
/// Definition of the equation id vector
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
/// Node definition
typedef Node<3> NodeType;
/// Containers definition
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedEliminationBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(LHS_Contribution, RHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateSystemContributions(*it_elem, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray);
#else
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id);
#endif
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateSystemContributions(*it_cond, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info);
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray);
#else
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id);
#endif
}
}
}
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System build time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
// Resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// Contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
#pragma omp parallel firstprivate(LHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, LHS_Contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, LHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
// Resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// Contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
#pragma omp parallel firstprivate(LHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void SystemSolve(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(rb) != 0) {
norm_b = TSparseSpace::TwoNorm(rb);
} else {
norm_b = 0.0;
}
if (norm_b != 0.0) {
// Do solve
BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb);
} else
TSparseSpace::SetToZero(rDx);
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(rb) != 0) {
norm_b = TSparseSpace::TwoNorm(rb);
} else {
norm_b = 0.0;
}
if (norm_b != 0.0) {
// Provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, BaseType::mDofSet, rModelPart);
// Do solve
BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx);
KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, rA, rb);
Timer::Stop("Build");
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
SystemSolve(rA, rDx, rb);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
// Resetting to zero the vector of reactions
if(BaseType::mCalculateReactionsFlag) {
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
}
// Getting the Elements
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
// Assemble all elements
#pragma omp parallel firstprivate( RHS_Contribution, equation_id)
{
const auto it_elem_begin = r_elements_array.begin();
const int nelements = static_cast<int>(r_elements_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < nelements; ++i) {
auto it_elem = it_elem_begin + i;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it_elem, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleRHS(rb, RHS_Contribution, equation_id);
}
}
// Assemble all conditions
const auto it_cond_begin = r_conditions_array.begin();
const int nconditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconditions; ++i) {
auto it_cond = it_cond_begin + i;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateRHSContribution(*it_cond, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleRHS(rb, RHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
// Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int nelements = static_cast<int>(r_elements_array.size());
DofsVectorType elemental_dof_list;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
SizeType nthreads = ParallelUtilities::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
std::vector<set_type> dofs_aux_list(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); ++i) {
dofs_aux_list[i].reserve(nelements);
}
IndexPartition<std::size_t>(nelements).for_each(elemental_dof_list, [&](std::size_t Index, DofsVectorType& tls_elemental_dof_list){
auto it_elem = r_elements_array.begin() + Index;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, tls_elemental_dof_list, r_current_process_info);
dofs_aux_list[this_thread_id].insert(tls_elemental_dof_list.begin(), tls_elemental_dof_list.end());
});
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int nconditions = static_cast<int>(r_conditions_array.size());
IndexPartition<std::size_t>(nconditions).for_each(elemental_dof_list, [&](std::size_t Index, DofsVectorType& tls_elemental_dof_list){
auto it_cond = r_conditions_array.begin() + Index;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, tls_elemental_dof_list, r_current_process_info);
dofs_aux_list[this_thread_id].insert(tls_elemental_dof_list.begin(), tls_elemental_dof_list.end());
});
// Here we do a reduction in a tree so to have everything on thread 0
SizeType old_max = nthreads;
SizeType new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max) {
IndexPartition<std::size_t>(new_max).for_each([&](std::size_t Index){
if (Index + new_max < old_max) {
dofs_aux_list[Index].insert(dofs_aux_list[Index + new_max].begin(), dofs_aux_list[Index + new_max].end());
dofs_aux_list[Index + new_max].clear();
}
});
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType dof_temp;
BaseType::mDofSet = DofsArrayType();
dof_temp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); ++it) {
dof_temp.push_back(*it);
}
dof_temp.Sort();
BaseType::mDofSet = dof_temp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef USE_LOCKS_IN_ASSEMBLY
if (mLockArray.size() != 0) {
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_destroy_lock(&mLockArray[i]);
}
mLockArray.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_init_lock(&mLockArray[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag()) {
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(ModelPart& rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
/**
* @brief This method resize and initializes the system of euqations
* @param pA The pointer to the LHS matrix
* @param pDx The pointer to the vector of Unknowns
* @param pb The pointer to the RHS vector
* @param rModelPart The model part to be computed
*/
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& rA = *pA;
TSystemVectorType& rDx = *pDx;
TSystemVectorType& rb = *pb;
// Resizing the system vectors and matrix
if (rA.size1() == 0 || BaseType::GetReshapeMatrixFlag()) { // If the matrix is not initialized
rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, rA, rModelPart);
} else {
if (rA.size1() != BaseType::mEquationSystemSize || rA.size2() != BaseType::mEquationSystemSize) {
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, rA, rModelPart);
}
}
if (rDx.size() != BaseType::mEquationSystemSize) {
rDx.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(rDx);
if (rb.size() != BaseType::mEquationSystemSize) {
rb.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(rb);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true) {
const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if (BaseType::mpReactionsVector->size() != reactions_vector_size)
BaseType::mpReactionsVector->resize(reactions_vector_size, false);
}
KRATOS_CATCH("")
}
/**
* @brief This method computes the reactions
* @param pScheme The integration scheme considered
* @param rModelPart The model part considered
* @param rA The LHS of the system
* @param rDx The vector of Unknowns
* @param rb The RHS vector
*/
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme, rModelPart, rb);
// Updating variables
std::size_t i;
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) {
i = (*it2)->EquationId();
if (i >= BaseType::mEquationSystemSize) {
i -= BaseType::mEquationSystemSize;
(*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i];
}
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
this->mpReactionsVector.reset();
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "elimination_builder_and_solver"
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "elimination_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
#ifdef USE_LOCKS_IN_ASSEMBLY
std::vector<omp_lock_t> mLockArray;
#endif
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assembles the system
* @param rA The LHS of the system
* @param rb The RHS of the system
* @param rLHSContribution The LHS local contribution
* @param rRHSContribution The RHS local contribution
* @param rEquationId The equation id
* @param rLockArray The lock of the dof
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling
*/
void Assemble(
TSystemMatrixType& rA,
TSystemVectorType& rb,
const LocalSystemMatrixType& rLHSContribution,
const LocalSystemVectorType& rRHSContribution,
const Element::EquationIdVectorType& rEquationId
#ifdef USE_LOCKS_IN_ASSEMBLY
,std::vector< omp_lock_t >& rLockArray
#endif
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_set_lock(&rLockArray[i_global]);
rb[i_global] += rRHSContribution(i_local);
#else
double& r_a = rb[i_global];
const double& v_a = rRHSContribution(i_local);
AtomicAdd(r_a, v_a);
#endif
AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId);
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_unset_lock(&rLockArray[i_global]);
#endif
}
//note that computation of reactions is not performed here!
}
}
/**
* @brief This method construcs the relationship between the DoF
* @param pScheme The integration scheme
* @param rA The LHS of the system
* @param rModelPart The model part which defines the problem
*/
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& rA,
ModelPart& rModelPart
)
{
// Filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const SizeType equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<IndexType> > indices(equation_size);
block_for_each(indices, [](std::unordered_set<IndexType>& rIndices){
rIndices.reserve(40);
});
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<IndexType> > temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) {
auto it_elem = it_elem_begin + i_elem;
pScheme->EquationId( *it_elem, ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) {
auto it_cond = it_cond_begin + i_cond;
pScheme->EquationId( *it_cond, ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) {
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
// Count the row sizes
SizeType nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
rA = TSystemMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (IndexType i = 0; i < rA.size1(); ++i)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
IndexPartition<std::size_t>(rA.size1()).for_each([&](std::size_t Index){
const IndexType row_begin = Arow_indices[Index];
const IndexType row_end = Arow_indices[Index + 1];
IndexType k = row_begin;
for (auto it = indices[Index].begin(); it != indices[Index].end(); ++it) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
++k;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
});
rA.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
/**
* @brief This method assembles the LHS of the system
* @param rA The LHS to assemble
* @param rLHSContribution The local LHS contribution
* @param rEquationId The equation id
*/
void AssembleLHS(
TSystemMatrixType& rA,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
for (IndexType j_local = 0; j_local < local_size; ++j_local) {
const IndexType j_global = rEquationId[j_local];
if (j_global < BaseType::mEquationSystemSize) {
rA(i_global, j_global) += rLHSContribution(i_local, j_local);
}
}
}
}
}
/**
* @brief This function is equivalent to the AssembleRowContribution of the block builder and solver
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped
*/
inline void AssembleRowContributionFreeDofs(
TSystemMatrixType& rA,
const Matrix& rALocal,
const IndexType i,
const IndexType i_local,
const Element::EquationIdVectorType& EquationId
)
{
double* values_vector = rA.value_data().begin();
IndexType* index1_vector = rA.index1_data().begin();
IndexType* index2_vector = rA.index2_data().begin();
const IndexType left_limit = index1_vector[i];
// Find the first entry
// We iterate over the equation ids until we find the first equation id to be considered
// We count in which component we find an ID
IndexType last_pos = 0;
IndexType last_found = 0;
IndexType counter = 0;
for(IndexType j=0; j < EquationId.size(); ++j) {
++counter;
const IndexType j_global = EquationId[j];
if (j_global < BaseType::mEquationSystemSize) {
last_pos = ForwardFind(j_global,left_limit,index2_vector);
last_found = j_global;
break;
}
}
// If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered
if (counter <= EquationId.size()) {
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r_a = values_vector[last_pos];
const double& v_a = rALocal(i_local,counter - 1);
AtomicAdd(r_a, v_a);
#else
values_vector[last_pos] += rALocal(i_local,counter - 1);
#endif
// Now find all of the other entries
IndexType pos = 0;
for(IndexType j = counter; j < EquationId.size(); ++j) {
IndexType id_to_find = EquationId[j];
if (id_to_find < BaseType::mEquationSystemSize) {
if(id_to_find > last_found)
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
else if(id_to_find < last_found)
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
else
pos = last_pos;
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r = values_vector[pos];
const double& v = rALocal(i_local,j);
AtomicAdd(r, v);
#else
values_vector[pos] += rALocal(i_local,j);
#endif
last_found = id_to_find;
last_pos = pos;
}
}
}
}
inline IndexType ForwardFind(const IndexType id_to_find,
const IndexType start,
const IndexType* index_vector)
{
IndexType pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline IndexType BackwardFind(const IndexType id_to_find,
const IndexType start,
const IndexType* index_vector)
{
IndexType pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method ensures that the contribution is unique
*/
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
/**
* @brief This method assembles the RHS of the system
* @param rb The RHS to assemble
* @param rRHSContribution The local RHS contribution
* @param rEquationId The equation id
*/
void AssembleRHS(
TSystemVectorType& rb,
const LocalSystemVectorType& rRHSContribution,
const EquationIdVectorType& rEquationId
)
{
SizeType local_size = rRHSContribution.size();
if (BaseType::mCalculateReactionsFlag == false) {
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) { // Free dof
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = rb[i_global];
const double& rhs_value = rRHSContribution[i_local];
AtomicAdd(b_value, rhs_value);
}
}
} else {
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) { //free dof
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = rb[i_global];
const double& rhs_value = rRHSContribution[i_local];
AtomicAdd(b_value, rhs_value);
} else { // Fixed dof
double& b_value = r_reactions_vector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = rRHSContribution[i_local];
AtomicAdd(b_value, rhs_value);
}
}
}
}
/**
* @brief This method assembles the LHS of the system (on free rows)
* @param rA The LHS to assemble
* @param rLHSContribution The local LHS contribution
* @param rEquationId The equation id
*/
void AssembleLHSCompleteOnFreeRows(
TSystemMatrixType& rA,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
for (IndexType j_local = 0; j_local < local_size; ++j_local) {
const IndexType j_global = rEquationId[j_local];
rA(i_global, j_global) += rLHSContribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
supervised.c | /*
Copyright (C) <2009-2011> <Alexandre Xavier Falcão and João Paulo Papa>
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
please see full copyright in COPYING file.
-------------------------------------------------------------------------
written by A.X. Falcão <afalcao@ic.unicamp.br> and by J.P. Papa
<papa.joaopaulo@gmail.com>, Oct 20th 2008
This program is a collection of functions to manage the Optimum-Path Forest (OPF)
classifier.*/
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
#include "common.h"
#include "set.h"
#include "graph.h"
#include "realheap.h"
#include "metrics.h"
#include "measures.h"
#include "knn.h"
#include "supervised.h"
#include <omp.h>
// Find prototypes by the MST approach
static void
mst_prototypes (struct opf_graph * sg)
{
int p, q;
double weight;
struct real_heap *Q = NULL;
double *path_val = NULL;
int pred;
int nproto;
// initialization
path_val = alloc_double (sg->node_n);
Q = real_heap_create (sg->node_n, path_val);
for (p = 0; p < sg->node_n; p++)
{
path_val[p] = DBL_MAX;
sg->node[p].status = STATUS_NOTHING;
}
path_val[0] = 0;
sg->node[0].pred = NIL;
real_heap_insert (Q, 0);
nproto = 0;
// Prim's algorithm for Minimum Spanning Tree
while (!real_heap_is_empty (Q))
{
real_heap_remove (Q, &p);
assert (p >= 0 && p < sg->node_n);
sg->node[p].path_val = path_val[p];
pred = sg->node[p].pred;
if (pred != NIL)
if (sg->node[p].label_true != sg->node[pred].label_true)
{
if (sg->node[p].status != STATUS_PROTOTYPE)
{
sg->node[p].status = STATUS_PROTOTYPE;
nproto++;
}
if (sg->node[pred].status != STATUS_PROTOTYPE)
{
sg->node[pred].status = STATUS_PROTOTYPE;
nproto++;
}
}
for (q = 0; q < sg->node_n; q++)
{
if (Q->color[q] != COLOR_BLACK)
{
if (p != q)
{
weight = opf_graph_get_distance (sg, &sg->node[p], &sg->node[q]);
if (weight < path_val[q])
{
sg->node[q].pred = p;
real_heap_update (Q, q, weight);
}
}
}
}
}
real_heap_destroy (&Q);
free (path_val);
/* the algorithm will work even if there
is just one class in the training set */
if (nproto == 0)
sg->node[0].status = STATUS_PROTOTYPE;
}
void
opf_supervised_train (struct opf_graph * sg)
{
int p, q, i;
double tmp, weight;
struct real_heap *Q = NULL;
double *path_val = NULL;
// compute optimum prototypes
mst_prototypes (sg);
// initialization
path_val = alloc_double (sg->node_n);
Q = real_heap_create (sg->node_n, path_val);
for (p = 0; p < sg->node_n; p++)
{
if (sg->node[p].status == STATUS_PROTOTYPE)
{
sg->node[p].pred = NIL;
path_val[p] = 0;
sg->node[p].label = sg->node[p].label_true;
real_heap_insert (Q, p);
}
else // non-prototypes
{
path_val[p] = DBL_MAX;
}
}
// IFT with fmax
i = 0;
while (!real_heap_is_empty (Q))
{
real_heap_remove (Q, &p);
assert (p >= 0 && p < sg->node_n);
sg->ordered_list_of_nodes[i] = p;
i++;
sg->node[p].path_val = path_val[p];
for (q = 0; q < sg->node_n; q++)
{
if (p != q)
{
if (path_val[p] < path_val[q])
{
weight = opf_graph_get_distance (sg, &sg->node[p], &sg->node[q]);
tmp = MAX (path_val[p], weight);
if (tmp < path_val[q])
{
sg->node[q].pred = p;
sg->node[q].label = sg->node[p].label;
real_heap_update (Q, q, tmp);
}
}
}
}
}
real_heap_destroy (&Q);
free (path_val);
}
//Classification function: it simply classifies samples from sg -----
void
opf_supervised_classify (struct opf_graph * sg_train, double *feat, int sample_n, int *label)
{
int i;
omp_set_num_threads(NTHREADS);
#pragma omp parallel for
for (i = 0; i < sample_n; i++)
{
int c_label = -1;
int j = 0;
double minCost = DBL_MAX;
for (j=0;
(j < sg_train->node_n)
&& (minCost > sg_train->node[sg_train->ordered_list_of_nodes[j]].path_val);
j++)
{
int l;
double tmp, weight;
l = sg_train->ordered_list_of_nodes[j];
assert (l >= 0 && l < sg_train->node_n);
if (sg_train->pdist) /* feat is a distance matrix */
weight = feat[sg_train->node[l].position*sample_n+i];
else /* feat are feature vectors */
weight = sg_train->arc_weight
(sg_train->node[l].feat, &feat[i*sg_train->feat_n], sg_train->feat_n);
tmp = MAX (sg_train->node[l].path_val, weight);
if (tmp < minCost)
{
minCost = tmp;
c_label = sg_train->node[l].label;
}
}
label[i] = c_label;
}
}
static void
supervised_classify_opf_graph (struct opf_graph * sg_train, struct opf_graph * sg_eval)
{
int i;
assert (sg_train->pdist == sg_eval->pdist);
assert (sg_train->feat_n == sg_eval->feat_n);
assert (sg_train->arc_weight == sg_eval->arc_weight);
omp_set_num_threads(NTHREADS);
#pragma omp parallel for
for (i = 0; i < sg_eval->node_n; i++)
{
int c_label = -1;
int j = 0;
double minCost = DBL_MAX;
for (j=0;
(j < sg_train->node_n)
&& (minCost > sg_train->node[sg_train->ordered_list_of_nodes[j]].path_val);
j++)
{
int l;
double tmp, weight;
l = sg_train->ordered_list_of_nodes[j];
assert (l >= 0 && l < sg_train->node_n);
weight = opf_graph_get_distance (sg_train, &sg_train->node[l], &sg_eval->node[i]);
tmp = MAX (sg_train->node[l].path_val, weight);
if (tmp < minCost)
{
minCost = tmp;
c_label = sg_train->node[l].label;
}
}
sg_eval->node[i].label = c_label;
}
}
/*
OPF Accuracy Function
*/
static double
accuracy (struct opf_graph *sg)
{
int i;
int *fp = (int*) malloc(sizeof(int)*sg->label_n);
int *fn = (int*) malloc(sizeof(int)*sg->label_n);
int *n = (int*) malloc(sizeof(int)*sg->label_n);
memset(fp, 0, sizeof(int)*sg->label_n);
memset(fn, 0, sizeof(int)*sg->label_n);
memset(n, 0, sizeof(int)*sg->label_n);
double sum_e = 0.0;
for (i=0; i < sg->node_n; i++)
{
/* false positives */
fp[sg->node[i].label] += (sg->node[i].label != sg->node[i].label_true);
/* false negatives */
fn[sg->node[i].label_true] += (sg->node[i].label != sg->node[i].label_true);
/* number of elements */
n[sg->node[i].label_true] ++;
}
for (i=0; i < sg->label_n; i++)
{
/* e_i,1 + e_i,2 */
sum_e += (double)(fp[i]) / (sg->node_n - n[i]) + (double)(fn[i]) / n[i];
}
return 1.0 - sum_e / (2.0 * sg->label_n);
}
/* Replace errors from evaluating set by non prototypes from training set */
static void
swap_wrong_prototypes (struct opf_graph *sg_train, struct opf_graph *sg_eval)
{
int i;
int nonprototypes = 0;
for (i = 0; i < sg_train->node_n; i++)
if (sg_train->node[i].status == STATUS_NOTHING) nonprototypes++;
while (i < sg_eval->node_n && nonprototypes > 0)
{
if (sg_eval->node[i].label != sg_eval->node[i].label_true)
{
/* XXX: this can take a lot of time */
int j;
do
{
j = random_int (0, sg_train->node_n);
}
while (sg_train->node[j].status == STATUS_PROTOTYPE);
{
struct snode tmp = sg_train->node[j];
sg_train->node[j] = sg_eval->node[i];
sg_eval->node[i] = tmp;
}
sg_train->node[j].pred = NIL;
sg_train->node[j].status = STATUS_PROTOTYPE;
nonprototypes--;
}
i++;
}
}
/* create two pointers to a opf_graph data,
this function is used in some training modes to reorganize nodes */
static void
opf_graph_split_mirrored (struct opf_graph * sg, double split,
struct opf_graph * sg1, struct opf_graph * sg2)
{
memset (sg1, 0xFF, sizeof(struct opf_graph));
memset (sg2, 0xFF, sizeof(struct opf_graph));
sg1->node_n = sg->node_n * split;
sg2->node_n = sg->node_n - sg1->node_n;
sg1->node = &sg->node[0];
sg2->node = &sg->node[sg1->node_n];
sg1->ordered_list_of_nodes = &sg->ordered_list_of_nodes[0];
sg2->ordered_list_of_nodes = &sg->ordered_list_of_nodes[sg1->node_n];
sg1->arc_weight = sg2->arc_weight = sg->arc_weight;
sg1->feat_data = sg2->feat_data = sg->feat_data;
sg1->feat_n = sg2->feat_n = sg->feat_n;
sg1->pdist = sg2->pdist = sg->pdist;
sg1->pdist_train_stride = sg2->pdist_train_stride = sg->pdist_train_stride;
}
#define ITER_MAX 10
void
opf_supervised_train_iterative (struct opf_graph *sg, double split)
{
int i = 0;
double acc = DBL_MIN, acc_prev = DBL_MIN, delta;
struct opf_graph sg_train, sg_eval;
opf_graph_split_mirrored (sg, split, &sg_train, &sg_eval);
do
{
acc_prev = acc;
opf_supervised_train (&sg_train);
supervised_classify_opf_graph (&sg_train, &sg_eval);
acc = accuracy (&sg_eval);
swap_wrong_prototypes (&sg_train, &sg_eval);
delta = fabs(acc-acc_prev);
i++;
}
while ((delta > DBL_EPSILON) && (i < ITER_MAX));
/* just the training part will remain */
opf_graph_resize (sg, sg_train.node_n);
}
/* Move misclassified data from eval to sg */
static void
move_misclassified_nodes (struct opf_graph *sg_train, struct opf_graph *sg_eval, int *n)
{
int i;
int misclassified_n = 0;
/* count number of misclassied samples in eval */
for (i=0; i < sg_eval->node_n; i++)
(sg_eval->node[i].label != sg_eval->node[i].label_true)?
misclassified_n++ : 0;
*n = misclassified_n;
/* nothing to do */
if (misclassified_n == 0)
return;
/* move wrong labelled samples from sg_eval to sg_train */
for (i=0; i < sg_eval->node_n; i++)
{
if (sg_eval->node[i].label != sg_eval->node[i].label_true)
{
/* have in mind that sg_train and sg_eval
are mirrorred to the same opf_graph, so
sg_train->node[sg_train->node_n] == sg_eval->node[0] */
{
struct snode tmp = sg_train->node[sg_train->node_n];
sg_train->node[sg_train->node_n] = sg_eval->node[i];
sg_eval->node[i] = tmp;
}
sg_train->node_n++;
sg_eval->node_n--;
/* sg_train took the first element of sg_eval */
sg_eval->node++;
sg_eval->ordered_list_of_nodes++;
}
}
}
void
opf_supervised_train_agglomerative (struct opf_graph *sg, double split)
{
int n;
struct opf_graph sg_train, sg_eval;
opf_graph_split_mirrored (sg, split, &sg_train, &sg_eval);
/* while there exists misclassified samples in eval */
do
{
n = 0;
opf_supervised_train (&sg_train);
supervised_classify_opf_graph (&sg_train, &sg_eval);
move_misclassified_nodes (&sg_train, &sg_eval, &n);
}
while(n > 0);
/* just the training part will remain */
opf_graph_resize (sg, sg_train.node_n);
}
|
matrix_s.h | //
// matrix.cpp
// Define Class for Vector & Matrix
//
// Created by Yoshi Miyazaki on 2015/04/11.
//
#include "matrix.h"
/*----------------------------------------
Vector Types Constructers
---------------------------------------*/
template<class T>
Vector1d<T>::Vector1d(){
n = 0;
v = 0;
}
template<class T>
Vector1d<T>::Vector1d(int nn){
n = nn;
v = new T[n];
}
template<class T>
Vector1d<T>::Vector1d(const T& a, int nn){
n = nn;
v = new T[nn];
for (int i=0; i<nn; i++){
v[i] = a;
}
}
template<class T>
Vector1d<T>::Vector1d(const T* a, int nn){
n = nn;
v = new T[n];
for (int i=0; i<nn; i++){
v[i] = *a++;
}
}
template<class T>
Vector1d<T>::Vector1d(const Vector1d<T> ©){
n = copy.n;
v = new T[n];
for (int i=0; i<n; i++){
v[i] = copy[i];
}
}
/*----------------------------------------
Operater
---------------------------------------*/
template<class T>
Vector1d<T>& Vector1d<T>::operator=(const Vector1d<T> ©){
if (this != ©){
if (n != copy.n){
if (v != 0) delete[] v;
n = copy.n;
v = new T[n];
}
for (int i=0; i<n; i++){
v[i] = copy[i];
}
}
return *this;
}
template<class T>
Vector1d<T>& Vector1d<T>::operator=(const T &a){
for (int i=0; i<n; i++){
v[i] = a;
}
return *this;
}
template<class T>
const bool Vector1d<T>::operator==(const Vector1d<T>& rhs) const{
if (n != rhs.n){
return 0;
}
else{
bool b = 1;
for (int i=0; i<n; i++){
if (v[i] != rhs[i]){
b = 0;
break;
}
}
return b;
}
}
template<class T>
void Vector1d<T>::resize(int nn){
if (n != nn){
if (v != 0){
delete[] v;
}
n = nn;
v = new T[n];
}
}
template<class T>
void Vector1d<T>::resize(const T& a, int nn){
T *copy = new T[n];
for (int i=0; i<n; i++){ copy[i] = v[i]; }
int n_old = n;
if (n != nn){
if (v != 0){ delete[] v; }
n = nn;
v = new T[n];
}
for (int i=0; i<n_old; i++){ v[i] = copy[i];}
for (int i=n_old; i<n; i++){ v[i] = a; }
if (copy != 0){ delete[] copy; }
}
template<class T>
void Vector1d<T>::erase(int ir){
if (ir < 0 || n <= ir){ return; } /* if index is outside the range */
T *copy = new T[n];
for (int i=0; i<n; i++){ copy[i] = v[i]; }
if (v != 0){ delete[] v; }
n--; v = new T[n];
for (int i=0; i<ir; i++){ v[i] = copy[i]; }
for (int i=ir; i<n; i++){ v[i] = copy[i+1]; }
if (copy != 0){ delete[] copy; }
}
/*----------------------------------------
Mathematical Operater
---------------------------------------*/
template<class T>
const T Vector1d<T>::norm() const{
T norm = 0;
for (int i=0; i<n; i++){
norm += v[i]*v[i];
}
return sqrt(norm);
}
template<class T>
const T Vector1d<T>::maxv() const{
T maxv = v[0];
for (int i=1; i<n; i++){
if (maxv < v[i]){maxv = v[i];}
}
return maxv;
}
template<class T>
const T Vector1d<T>::minv() const{
T minv = v[0];
for (int i=1; i<n; i++){
if (minv > v[i]){minv = v[i];}
}
return minv;
}
template<class T>
const int Vector1d<T>::maxw() const{
T maxv = v[0]; int maxw = 0;
for (int i=1; i<n; i++){
if (maxv < v[i]){maxv = v[i]; maxw = i;}
}
return maxw;
}
template<class T>
const int Vector1d<T>::minw() const{
T minv = v[0]; int minw = 0;
for (int i=1; i<n; i++){
if (minv > v[i]){minv = v[i]; minw = i;}
}
return minw;
}
template<class T>
const T Vector1d<T>::sum() const{
T tot = 0;
for (int i=0; i<n; i++){ tot += v[i]; }
return tot;
}
template<class T>
const T Vector1d<T>::average() const{
T ave = 0;
for (int i=0; i<n; i++){
ave += v[i];
}
return ave/double(n);
}
template<class T> /* maximum of abs(v[i]) */
const T Vector1d<T>::absmaxv() const{
T maxv = abs(v[0]);
for (int i=1; i<n; i++){
if (maxv < abs(v[i])){maxv = abs(v[i]);}
}
return maxv;
}
template<class T> /* minimum of abs(v[i]) */
const T Vector1d<T>::absminv() const{
T minv = abs(v[0]);
for (int i=1; i<n; i++){
if (minv > abs(v[i])){minv = abs(v[i]);}
}
return minv;
}
template<class T> /* minimum of abs(v[i]) */
const T Vector1d<T>::absnon0minv() const{
T minv = absmaxv();
for (int i=0; i<n; i++){
if ((minv > abs(v[i])) && (v[i] != 0)){minv = abs(v[i]);}
}
return minv;
}
template<class T> /* average of abs(v[i]) */
const T Vector1d<T>::absaverage() const{
T ave = 0;
for (int i=0; i<n; i++){
ave += (v[i]>0 ? v[i] : -1.0*v[i]);
}
return ave/double(n);
}
template<class T> /* dot product */
const T Vector1d<T>::operator*(const Vector1d<T>& A) const{
int nA; nA = A.size();
T dotp = 0;
if (nA != n){
cout << "size of vectors don't match (*). Revise your input." << endl;
exit(7);
}
else{
for (int i=0; i<n; i++){
dotp += v[i]*A[i];
}
return dotp;
}
}
template<class T>
const bool Vector1d<T>::isnan() const{
bool isNAN = false;
for (int i=0; i<n; i++){
T current = v[i];
if(std::isnan(current)){ isNAN = true; break; }
}
return isNAN;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator+(const Vector1d<T>& A){
int nA = A.size();
if (nA != n){
cout << "size of vectors don't match (+). Revise your input." << endl;
exit(7);
}
else{
Vector1d<double> sum(n);
for (int i=0; i<n; i++){ sum[i] = v[i] + A[i]; }
return sum;
}
}
template<class T>
const Vector1d<T> Vector1d<T>::operator+(const Vector1d<T>& A) const{
int nA = A.size();
if (nA != n){
cout << "size of vectors don't match (+). Revise your input." << endl;
exit(7);
}
else{
Vector1d<double> sum(n);
for (int i=0; i<n; i++){ sum[i] = v[i] + A[i]; }
return sum;
}
}
template<class T>
const Vector1d<T> Vector1d<T>::operator-(const Vector1d<T>& A){
int nA = A.size();
if (nA != n){
cout << "size of vectors don't match (-). Revise your input." << endl;
exit(7);
}
else{
Vector1d<double> sum(n);
for (int i=0; i<n; i++){ sum[i] = v[i] - A[i]; }
return sum;
}
}
template<class T>
const Vector1d<T> Vector1d<T>::operator-(const Vector1d<T>& A) const{
int nA = A.size();
if (nA != n){
cout << "size of vectors don't match (-). Revise your input." << endl;
exit(7);
}
else{
Vector1d<double> sum(n);
for (int i=0; i<n; i++){ sum[i] = v[i] - A[i]; }
return sum;
}
}
template<class T>
const Vector1d<T> Vector1d<T>::operator+(const T& A){
Vector1d<double> sum(n);
for (int i=0; i<n; i++){
sum[i] = v[i] + A;
}
return sum;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator+(const T& A) const{
Vector1d<double> sum(n);
for (int i=0; i<n; i++){
sum[i] = v[i] + A;
}
return sum;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator-(const T& A){
Vector1d<double> sum(n);
for (int i=0; i<n; i++){
sum[i] = v[i] - A;
}
return sum;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator-(const T& A) const{
Vector1d<double> sum(n);
for (int i=0; i<n; i++){
sum[i] = v[i] - A;
}
return sum;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator*(const T& A){
Vector1d<double> product(n);
for (int i=0; i<n; i++){
product[i] = v[i] * A;
}
return product;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator*(const T& A) const{
Vector1d<double> product(n);
for (int i=0; i<n; i++){
product[i] = v[i] * A;
}
return product;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator/(const T& A){
Vector1d<double> quotient(n);
for (int i=0; i<n; i++){
quotient[i] = v[i] / A;
}
return quotient;
}
template<class T>
const Vector1d<T> Vector1d<T>::operator/(const T& A) const{
Vector1d<double> quotient(n);
for (int i=0; i<n; i++){
quotient[i] = v[i] / A;
}
return quotient;
}
template<class T>
Vector1d<T>& Vector1d<T>::operator+=(const Vector1d<T>& A){
int nA;
nA = A.size();
if (nA != n){
cout << "size of vectors don't match (+=). Revise your input." << endl;
exit(7);
}
else{
for (int i=0; i<n; i++){
v[i] += A[i];
}
return *this;
}
}
template<class T>
Vector1d<T>& Vector1d<T>::operator+=(const T& a){
for (int i=0; i<n; i++){
v[i] += a;
}
return *this;
}
template<class T>
Vector1d<T>& Vector1d<T>::operator-=(const Vector1d<T>& A){
int nA;
nA = A.size();
if (nA != n){
cout << "size of vectors don't match (-=). Revise your input." << endl;
exit(7);
}
else{
for (int i=0; i<n; i++){
v[i] -= A[i];
}
return *this;
}
}
template<class T>
Vector1d<T>& Vector1d<T>::operator-=(const T& a){
for (int i=0; i<n; i++){
v[i] -= a;
}
return *this;
}
template<class T>
Vector1d<T>& Vector1d<T>::operator*=(const T& a){
for (int i=0; i<n; i++){
v[i] *= a;
}
return *this;
}
template<class T>
Vector1d<T>& Vector1d<T>::operator/=(const T& a){
for (int i=0; i<n; i++){
v[i] /= a;
}
return *this;
}
template<class T>
tensor1d<T> Vector1d<T>::to_tensor(){
tensor1d<T> conv(n);
int i=0;
for (auto it=conv.begin(); it!=conv.end(); it++){
*it = v[i]; i++;
}
return conv;
}
/*----------------------------------------
Destructers
---------------------------------------*/
template<class T>
Vector1d<T>::~Vector1d<T>(){
if (v != 0){
delete[] (v);
}
}
/*----------------------------------------
Matrix Types Constructers
---------------------------------------*/
template<class T>
Matrix<T>::Matrix(){
n = 0; m = 0;
v = 0;
}
template<class T>
Matrix<T>::Matrix(int nn, int mm){
n = nn; m = mm;
v = new T*[n];
v[0] = new T[m*n];
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
}
template<class T>
Matrix<T>::Matrix(const T &a, int nn, int mm){
n = nn; m = mm;
v = new T*[n];
v[0] = new T[m*n];
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] = a;
}
}
}
template<class T>
Matrix<T>::Matrix(const T *a, int nn, int mm){
n = nn; m = mm;
v = new T*[n];
v[0] = new T[m*n];
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] = *a++;
}
}
}
template<class T>
Matrix<T>::Matrix(const Matrix ©){
n = copy.n; m = copy.m;
v = new T*[n];
v[0] = new T[m*n];
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] = copy[i][j];
}
}
}
/*----------------------------------------
Operater
---------------------------------------*/
template<class T>
Matrix<T>& Matrix<T>:: operator=(const Matrix<T> ©){
if (this != ©){
if (n != copy.n || m != copy.m){
if (v != 0){
delete v[0];
delete v;
}
n = copy.n;
m = copy.m;
v = new T*[n];
v[0] = new T[n*m];
}
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] = copy[i][j];
}
}
}
return *this;
}
template<class T>
Matrix<T>& Matrix<T>:: operator=(const T &r){
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] = r;
}
}
return *this;
}
template<class T>
void Matrix<T>::resize(int nn, int mm){
if (n != nn || m != mm){
if (v != 0){
delete v[0];
delete v;
}
n = nn;
m = mm;
v = new T*[n];
v[0] = new T[n*m];
}
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
}
template<class T>
void Matrix<T>::resize(const T& a, int nn, int mm){
if (n != nn || m != mm){
if (v != 0){
delete v[0];
delete v;
}
n = nn;
m = mm;
v = new T*[n];
v[0] = new T[n*m];
}
for (int i=1; i<n; i++){
v[i] = v[i-1] + m;
}
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] = a;
}
}
}
template<class T>
void Matrix<T>::add_row(Vector1d<double>& add){
if (m != add.size()){
if (m > 0){
cout << "matrix_s.h: add_row() - vector size unmatch. m = " << m;
cout << " , add.size() = " << add.size() << endl;
exit(1);
} else {
resize(1,add.size());
for (int j=0; j<m; j++){ v[0][j] = add[j]; }
// cout << "row = " << nrows() << " , col = " << mcols() << endl;
return;
}
}
/* copy data to tmp */
T** tmp = new T*[n];
tmp[0] = new T[m*n];
for (int i=1; i<n; i++){ tmp[i] = tmp[i-1] + m; }
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){ tmp[i][j] = v[i][j]; }
}
/* create new v */
if (v != 0){
if (m != 0){ delete[] v[0]; }
delete[] v;
}
n++;
v = new T*[n];
v[0] = new T[m*n];
/* copy data */
for (int i=1; i<n; i++){ v[i] = v[i-1] + m; }
for (int i=0; i<(n-1); i++){
for (int j=0; j<m; j++){ v[i][j] = tmp[i][j]; }
}
for (int j=0; j<m; j++){ v[n-1][j] = add[j]; }
delete[] tmp[0];
delete[] tmp;
}
template<class T>
void Matrix<T>::erase_row(int ir){
if (n == 0){ return; }
/* copy data to tmp */
T** tmp = new T*[n];
tmp[0] = new T[m*n];
for (int i=1; i<n; i++){ tmp[i] = tmp[i-1] + m; }
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){ tmp[i][j] = v[i][j]; }
}
/* create new v */
if (v != 0){
if (m != 0){ delete[] v[0]; }
delete[] v;
}
n--;
v = new T*[n]; v[0] = new T[m*n];
for (int i=1; i<n; i++){ v[i] = v[i-1] + m; }
/* copy data */
for (int i=0; i<ir; i++){
for (int j=0; j<m; j++){ v[i][j] = tmp[i][j]; }
}
for (int i=ir; i<n; i++){
for (int j=0; j<m; j++){ v[i][j] = tmp[i+1][j]; }
}
delete[] tmp[0];
delete[] tmp;
}
/*----------------------------------------
Return row & column vector
---------------------------------------*/
template<class T>
Vector1d<T> Matrix<T>::colvector(const int j) const{
Vector1d<T> rowv(n);
for (int i=0; i<n; i++){ rowv[i] = v[i][j]; }
return rowv;
}
template<class T>
Vector1d<T> Matrix<T>::rowvector(const int i) const{
Vector1d<T> colv(m);
for (int j=0; j<m; j++){ colv[j] = v[i][j]; }
return colv;
}
template<class T>
void Matrix<T>::setrowvector(const int i, const Vector1d<T>& _v){
for (int j=0; j<m; j++){ v[i][j] = _v[j]; }
}
template<class T>
void Matrix<T>::setcolvector(const int j, const Vector1d<T>& _v){
for (int i=0; i<n; i++){ v[i][j] = _v[i]; }
}
template<class T>
tensor1d<T> Matrix<T>::coltensor(const int j) const{
tensor1d<T> rowv(n);
for (int i=0; i<n; i++){ rowv[i] = v[i][j]; }
return rowv;
}
template<class T>
tensor1d<T> Matrix<T>::rowtensor(const int i) const{
tensor1d<T> colv(m);
for (int j=0; j<m; j++){ colv[j] = v[i][j]; }
return colv;
}
template<class T>
void Matrix<T>::setrowtensor(const int i, const tensor1d<T>& _v){
if (m != (int)_v.size()){
cout << "error in `setrowvector`: wrontg input tensor size. ";
cout << m << " <-> " << _v.size() << endl;
}
for (int j=0; j<m; j++){ v[i][j] = _v[j]; }
}
template<class T>
void Matrix<T>::setcoltensor(const int j, const tensor1d<T>& _v){
for (int i=0; i<n; i++){ v[i][j] = _v[i]; }
}
/*----------------------------------------
Mathematical Operater
---------------------------------------*/
template<class T>
Matrix<T> Matrix<T>::transpose(){
Matrix<T> tran(m,n); int i,j;
for (i=0; i<n; i++){
for (j=0; j<m; j++){
tran[j][i] = v[i][j];
}
}
return tran;
}
template<class T>
Matrix<T> Matrix<T>::lu_decomp(){
if (m != n){
cout << "unable to calculate the inverse" << endl;
exit(25);
}
Matrix<T> lu(m,m);
/* LU decomposition */
for (int i=0; i<m; i++){
/* calculate l_ij */
for (int j=i; j<m; j++){
lu[j][i] = v[j][i];
for (int k=0; k<i; k++){
lu[j][i] -= lu[k][i]*lu[j][k];
}
}
/* calculate u_ij */
for (int j=i+1; j<m; j++){
lu[i][j] = v[i][j];
for (int k=0; k<i; k++){
lu[i][j] -= lu[k][j]*lu[i][k];
}
lu[i][j] /= lu[i][i];
}
}
return lu;
}
template<class T>
void Matrix<T>::lu_linear(Vector1d<T>& A){
/* calculate solution */
for (int i=0; i<n; i++){
for (int k=0; k<i; k++){ A[i] -= v[i][k]*A[k]; }
A[i] /= v[i][i];
}
for (int i=n-1; i>=0; i--){
for (int k=i+1; k<n; k++){
A[i] -= v[i][k]*A[k];
}
}
}
template<class T>
Matrix<T> Matrix<T>::lu_inverse(){
/* matrix should already been LU decomposed */
if (m != n){
cout << "unable to calculate the inverse" << endl;
exit(25);
}
/* prepare identiy matrix */
Matrix<T> inv(0.0,m,m);
for (int i=0; i<m; i++){
inv[i][i] = 1.0;
}
/* calculate inverse */
for (int j=0; j<m; j++){
for (int i=0; i<n; i++){
for (int k=0; k<i; k++){ inv[i][j] -= v[i][k]*inv[k][j]; }
inv[i][j] /= v[i][i];
}
for (int i=n-1; i>=0; i--){
for (int k=i+1; k<n; k++){
inv[i][j] -= v[i][k]*inv[k][j];
}
}
}
return inv;
}
template<class T>
Matrix<T>& Matrix<T>::numeric0(double LIM){
/* find abs max value in matrix */
T absmaxv = 0.0;
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
if (abs(v[i][j]) > absmaxv) {absmaxv = abs(v[i][j]);}
}
}
/* drop off all numeric error */
T eps = absmaxv*LIM*16;
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
if (abs(v[i][j]) < eps && v[i][j] != 0){ v[i][j] = 0; }
}
}
return *this;
}
template<class T>
Matrix<T>& Matrix<T>::operator+=(const Matrix<T>& B){
int nB = B.nrows();
int mB = B.mcols();
if ((nB != n) || (mB != m)){
cout << "size of matrixes don't match (+=). Revise your input." << endl;
exit(7);
}
else {
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] += B[i][j];
}
}
return *this;
}
}
template<class T>
Matrix<T>& Matrix<T>::operator-=(const Matrix<T>& B){
int nB = B.nrows();
int mB = B.mcols();
if ((nB != n) || (mB != m)){
cout << "size of matrixes don't match (-=). Revise your input." << endl;
exit(7);
}
else {
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] -= B[i][j];
}
}
return *this;
}
}
template<class T>
Matrix<T>& Matrix<T>::operator*=(const T& a){
for (int i=0; i<n; i++){
for (int j=0; j<m; j++){
v[i][j] *= a;
}
}
return *this;
}
template<class T>
Vector1d<T> Matrix<T>::operator*(Vector1d<T> &A){
int nA;
nA = A.size();
// cout << n << m << nB << mB << endl;
if (nA != m){
cout << "size of matrix & vector don't match (*). Revise your input. sizes: " << m << " & " << nA << endl;
exit(7);
}
else{
Vector1d<T> product(n);
for (int i=0; i<n; i++){
product[i] = 0;
for (int k=0; k<m; k++){
product[i] += v[i][k]*A[k];
}
}
return product;
}
}
template<class T>
tensor1d<T> Matrix<T>::operator*(tensor1d<T> &A){
size_t nA = A.size();
if ((int)nA != m){
cout << "size of matrix & vector don't match (*). sizes: " << m << " & " << nA << endl;
exit(7);
}
else{
tensor1d<T> product(n);
for (int i=0; i<n; i++){
product[i] = 0;
for (int k=0; k<m; k++){
product[i] += v[i][k]*A[k];
}
}
return product;
}
}
template<class T>
Matrix<T> Matrix<T>::operator*(Matrix<T> &B){
int nB, mB;
nB = B.nrows(); mB = B.mcols();
// cout << n << m << nB << mB << endl;
if (nB != m){
cout << "size of matricies don't match (*). Revise. " << nB << " x " << m << endl;
exit(7);
}
else{
Matrix<T> product(n,mB); int i,j,k;
// int NUM_THREADS=omp_get_num_procs();
// omp_set_num_threads(NUM_THREADS);
// #pragma omp parallel for private(j,k)
for (i=0; i<n; i++){
for (j=0; j<mB; j++){
product[i][j] = 0;
for (k=0; k<m; k++){
product[i][j] += v[i][k]*B[k][j];
}
}
}
return product;
}
}
/*----------------------------------------
Destructers
---------------------------------------*/
template<class T>
Matrix<T>::~Matrix<T>(){
if (v!=0){
if (m!=0){
delete[] v[0];
}
delete[] v;
}
}
|
gravity_hard.h | #pragma once
#define CALCFORCEFROMALLMEMBERS
#include "cutfunc.h"
template <class Tpsys>
void velKick(Tpsys & pp){
const PS::S32 n = pp.getNumberOfParticleLocal();
#pragma omp parallel for
for(PS::S32 i=0; i<n; i++){
pp[i].velKick();
}
}
#ifdef CORRECT_NEIGHBOR
template <class Tpsys>
void velKick2nd(Tpsys & pp){
const PS::S32 n = pp.getNumberOfParticleLocal();
#pragma omp parallel for
for(PS::S32 i=0; i<n; i++){
pp[i].velKick2nd();
}
}
#endif
template <class Tpsys>
void calcStarGravity(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.pos;
PS::F64vec veli = pp.vel;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec acci = pp.acc_;
#endif
PS::F64vec dr = - posi;
PS::F64vec dv = - veli;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec da = - acci;
#endif
PS::F64 r2inv = 1. / (dr*dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
//PS::F64 r5inv = r3inv * r2inv;
PS::F64 mj_rij3 = m_sun * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
#ifdef INTEGRATE_6TH_SUN
PS::F64 beta = (dv*dv + dr*da) * r2inv - 5. * alpha*alpha;
#endif
pp.phi_s = -m_sun * rinv;
pp.acc_s = mj_rij3 * dr;
pp.jerk_s = mj_rij3 * (dv - 3.*alpha * dr);
#ifdef INTEGRATE_6TH_SUN
pp.snap_s = mj_rij3 * (da - 6.*alpha * dv - 3.*beta * dr);
#endif
}
template <class Tpsys>
void calcStarGravity_p(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.xp;
PS::F64vec veli = pp.vp;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec acci = pp.ap;
#endif
PS::F64vec dr = - posi;
PS::F64vec dv = - veli;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec da = - acci;
#endif
PS::F64 r2inv = 1. / (dr * dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
//PS::F64 r5inv = r3inv * r2inv;
PS::F64 mj_rij3 = m_sun * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
#ifdef INTEGRATE_6TH_SUN
PS::F64 beta = (dv*dv + dr*da) * r2inv - 5. * alpha*alpha;
#endif
//pp.phi_s = -m_sun * rinv;
pp.acc_s = mj_rij3 * dr;
pp.jerk_s = mj_rij3 * (dv - 3.*alpha * dr);
#ifdef INTEGRATE_6TH_SUN
pp.snap_s = mj_rij3 * (da - 6.*alpha * dv - 3.*beta * dr);
#endif
}
template <class Tpsys>
void calcStarGravity_c(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.pos;
PS::F64vec veli = pp.vel;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec acci = pp.acc_;
#endif
PS::F64vec dr = - posi;
PS::F64vec dv = - veli;
#ifdef INTEGRATE_6TH_SUN
PS::F64vec da = - acci;
#endif
PS::F64 r2inv = 1. / (dr * dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
//PS::F64 r5inv = r3inv * r2inv;
PS::F64 mj_rij3 = m_sun * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
#ifdef INTEGRATE_6TH_SUN
PS::F64 beta = (dv*dv + dr*da) * r2inv - 5. * alpha*alpha;
#endif
//pp.phi_s = -m_sun * rinv;
pp.acc_s = mj_rij3 * dr;
pp.jerk_s = mj_rij3 * (dv - 3.*alpha * dr);
#ifdef INTEGRATE_6TH_SUN
pp.snap_s = mj_rij3 * (da - 6.*alpha * dv - 3.*beta * dr);
#endif
}
template <class Tpsys>
void calcStarAccJerk(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.pos;
PS::F64vec veli = pp.vel;
PS::F64vec dr = - posi;
PS::F64vec dv = - veli;
PS::F64 r2inv = 1. / (dr*dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
PS::F64 mj_rij3 = m_sun * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
pp.phi_s = -m_sun * rinv;
pp.acc_s = mj_rij3 * dr;
pp.jerk_s = mj_rij3 * (dv - 3.*alpha * dr);
}
#ifdef INTEGRATE_6TH_SUN
template <class Tpsys>
void calcStarSnap(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.pos;
PS::F64vec veli = pp.vel;
PS::F64vec acci = pp.acc_;
PS::F64vec dr = - posi;
PS::F64vec dv = - veli;
PS::F64vec da = - acci;
PS::F64 r2inv = 1. / (dr*dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
PS::F64 mj_rij3 = m_sun * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
PS::F64 beta = (dv*dv + dr*da) * r2inv - 5. * alpha*alpha;
pp.snap_s = mj_rij3 * (da - 6.*alpha * dv - 3.*beta * dr);
}
#endif
#if 0
template <class Tpsys>
void calcStarAcc(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.pos;
PS::F64vec dr = - posi;
PS::F64 r2inv = 1.0 / (dr * dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
pp.phi_s = -m_sun * rinv;
pp.acc_s = m_sun * r3inv * dr;
}
#endif
template <class Tpsys>
void calcStarJerk(Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2_sun;
const PS::F64 m_sun = FP_t::m_sun;
PS::F64vec posi = pp.pos;
PS::F64vec veli = pp.vel;
PS::F64vec dr = - posi;
PS::F64vec dv = - veli;
PS::F64 r2inv = 1.0 / (dr * dr + eps2);
PS::F64 rinv = sqrt(r2inv);
PS::F64 r3inv = rinv * r2inv;
//PS::F64 r5inv = r3inv * r2inv;
PS::F64 mj_rij3 = m_sun * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
pp.jerk_s = mj_rij3 * (dv - 3.*alpha * dr);
}
template <class Tp, class Tpsys>
void calcGravity(Tp & pi,
Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2;
//#ifndef INTEGRATE_6TH_SUN
calcStarGravity(pi);
//#else
//calcStarAccJerk(pi);
//pi.setAcc_();
//calcStarSnap(pi);
//#endif
pi.phi_d = 0.;
pi.acc_d = 0.;
pi.jerk_d = 0.;
PS::S32 pj_id = 0;
PS::F64vec xi = pi.pos;
PS::F64vec vi = pi.vel;
#ifndef CALCFORCEFROMALLMEMBERS
for(PS::S32 j=0; j<pi.neighbor; j++)
#else
for(PS::S32 j=0; j<pp.size(); j++)
#endif
{
#ifndef CALCFORCEFROMALLMEMBERS
pj_id = pi.n_hard_list.at(j);
#else
pj_id = j;
#endif
if ( pi.id == pp[pj_id].id ) continue;
PS::F64vec xj = pp[pj_id].pos;
PS::F64vec dr = xj - xi;
PS::F64 dr2 = dr * dr;
assert( dr2 != 0.0 );
dr2 += eps2;
PS::F64vec vj = pp[pj_id].vel;
PS::F64vec dv = vj - vi;
PS::F64 massj = pp[pj_id].mass;
PS::F64 rij = sqrt(dr2);
PS::F64 rinv = 1. / rij;
PS::F64 r2inv = rinv * rinv;
PS::F64 r3inv = r2inv * rinv;
//PS::F64 r5inv = r3inv * r2inv;
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out_inv = std::min(pi.r_out_inv, pp[pj_id].r_out_inv);
#else
PS::F64 r_out_inv = FP_t::r_out_inv;
#endif
PS::F64 mj_rij3 = massj * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
PS::F64 _W = 1.-cutoff_W(rij, r_out_inv);
PS::F64 _K = 1.-cutoff_K(rij, r_out_inv);
PS::F64 dKdt = cutoff_dKdt(rij, r_out_inv, alpha);
PS::F64 alpha_c = alpha*_K;
pi.phi_d -= massj * rinv * _W;
pi.acc_d += mj_rij3 * _K * dr;
pi.jerk_d += mj_rij3 * ( _K * dv - (3.*alpha_c + dKdt) * dr );
}
}
template <class Tp, class Tpsys>
void calcGravity_p(Tp & pi,
Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2;
calcStarGravity_p(pi);
//pi.phi_d = 0.;
pi.acc_d = 0.;
pi.jerk_d = 0.;
PS::S32 pj_id = 0;
PS::F64vec xpi = pi.xp;
PS::F64vec vpi = pi.vp;
#ifndef CALCFORCEFROMALLMEMBERS
for(PS::S32 j=0; j<pi.neighbor; j++)
#else
for(PS::S32 j=0; j<pp.size(); j++)
#endif
{
#ifndef CALCFORCEFROMALLMEMBERS
pj_id = pi.n_hard_list.at(j);
#else
pj_id = j;
#endif
if ( pi.id == pp[pj_id].id ) continue;
PS::F64vec xpj = pp[pj_id].xp;
PS::F64vec dr = xpj - xpi;
PS::F64 dr2 = dr * dr;
assert( dr2 != 0.0 );
dr2 += eps2;
PS::F64vec vpj = pp[pj_id].vp;
PS::F64vec dv = vpj - vpi;
PS::F64 massj = pp[pj_id].mass;
PS::F64 rij = sqrt(dr2);
PS::F64 rinv = 1. / rij;
PS::F64 r2inv = rinv * rinv;
PS::F64 r3inv = r2inv * rinv;
//PS::F64 r5inv = r3inv * r2inv;
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out_inv = std::min(pi.r_out_inv, pp[pj_id].r_out_inv);
#else
PS::F64 r_out_inv = FP_t::r_out_inv;
#endif
PS::F64 mj_rij3 = massj * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
//PS::F64 _W = 1.-cutoff_W(rij, r_out_inv);
PS::F64 _K = 1.-cutoff_K(rij, r_out_inv);
PS::F64 dKdt = cutoff_dKdt(rij, r_out_inv, alpha);
PS::F64 alpha_c = alpha*_K;
//pi.phi_d -= massj * rinv * _W;
pi.acc_d += mj_rij3 * _K * dr;
pi.jerk_d += mj_rij3 * ( _K * dv - (3.*alpha_c + dKdt) * dr );
}
}
template <class Tp, class Tpsys>
void calcGravity_c(Tp & pi,
Tpsys & pp)
{
//assert( pi.neighbor != 0 );
const PS::F64 eps2 = FP_t::eps2;
calcStarGravity_c(pi);
#ifdef CORRECT_INTERACTION_GRAVITY
pi.acc_d = 0.;
pi.jerk_d = 0.;
PS::S32 pj_id = 0;
PS::F64vec xi = pi.pos;
PS::F64vec vi = pi.vel;
#ifndef CALCFORCEFROMALLMEMBERS
for(PS::S32 j=0; j<pi.neighbor; j++)
#else
for(PS::S32 j=0; j<pp.size(); j++)
#endif
{
#ifndef CALCFORCEFROMALLMEMBERS
pj_id = pi.n_hard_list.at(j);
#else
pj_id = j;
#endif
if ( pi.id == pp[pj_id].id ) continue;
PS::F64vec xpj = pp[pj_id].xp;
PS::F64vec dr = xpj - xi;
PS::F64 dr2 = dr * dr;
assert( dr2 != 0.0 );
dr2 += eps2;
PS::F64vec vpj = pp[pj_id].vp;
PS::F64vec dv = vpj - vi;
PS::F64 massj = pp[pj_id].mass;
PS::F64 rij = sqrt(dr2);
PS::F64 rinv = 1. / rij;
PS::F64 r2inv = rinv * rinv;
PS::F64 r3inv = r2inv * rinv;
//PS::F64 r5inv = r3inv * r2inv;
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out_inv = std::min(pi.r_out_inv, pp[pj_id].r_out_inv);
#else
PS::F64 r_out_inv = FP_t::r_out_inv;
#endif
PS::F64 mj_rij3 = massj * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
//PS::F64 _W = 1.-cutoff_W(rij, r_out_inv);
PS::F64 _K = 1.-cutoff_K(rij, r_out_inv);
PS::F64 dKdt = cutoff_dKdt(rij, r_out_inv, alpha);
PS::F64 alpha_c = alpha*_K;
//pi.phi_d -= massj * rinv * _W;
pi.acc_d += mj_rij3 * _K * dr;
pi.jerk_d += mj_rij3 * ( _K * dv - (3.*alpha_c + dKdt) * dr );
}
#endif
}
template <class Tp, class Tpsys>
void calcJerk(Tp & pi,
Tpsys & pp)
{
const PS::F64 eps2 = FP_t::eps2;
calcStarJerk(pi);
pi.jerk = 0.;
PS::S32 pj_id = 0;
PS::F64vec xi = pi.pos;
PS::F64vec vi = pi.vel;
#ifndef CALCFORCEFROMALLMEMBERS
for(PS::S32 j=0; j<pi.neighbor; j++)
#else
for(PS::S32 j=0; j<pp.size(); j++)
#endif
{
#ifndef CALCFORCEFROMALLMEMBERS
pj_id = pi.n_hard_list.at(j);
#else
pj_id = j;
#endif
if ( pi.id == pp[pj_id].id ) continue;
PS::F64vec xj = pp[pj_id].pos;
PS::F64vec dr = xj - xi;
PS::F64 dr2 = dr * dr;
assert( dr2 != 0.0 );
dr2 += eps2;
PS::F64vec vj = pp[pj_id].vel;
PS::F64vec dv = vj - vi;
PS::F64 massj = pp[pj_id].mass;
PS::F64 rij = sqrt(dr2);
PS::F64 rinv = 1. / rij;
PS::F64 r2inv = rinv * rinv;
PS::F64 r3inv = r2inv * rinv;
//PS::F64 r5inv = r3inv * r2inv;
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out_inv = std::min(pi.r_out_inv, pp[pj_id].r_out_inv);
#else
PS::F64 r_out_inv = FP_t::r_out_inv;
#endif
PS::F64 mj_rij3 = massj * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
//PS::F64 _W = 1.-cutoff_W(rij, r_out_inv);
PS::F64 _K = 1.-cutoff_K(rij, r_out_inv);
PS::F64 dKdt = cutoff_dKdt(rij, r_out_inv, alpha);
PS::F64 alpha_c = alpha*_K;
pi.jerk_d += mj_rij3 * ( _K * dv - (3.*alpha_c + dKdt) * dr );
}
}
template <class Tp, class Tpsys>
void calcAccJerk(Tp & pi,
Tpsys & pp)
{
//assert( pi.neighbor != 0 );
const PS::F64 eps2 = FP_t::eps2;
calcStarAccJerk(pi);
pi.acc_d = 0.;
pi.jerk_d = 0.;
pi.phi_d = 0.;
PS::S32 pj_id = 0;
PS::F64vec xi = pi.pos;
PS::F64vec vi = pi.vel;
#ifndef CALCFORCEFROMALLMEMBERS
for(PS::S32 j=0; j<pi.neighbor; j++)
#else
for(PS::S32 j=0; j<pp.size(); j++)
#endif
{
#ifndef CALCFORCEFROMALLMEMBERS
pj_id = pi.n_hard_list.at(j);
#else
pj_id = j;
#endif
if ( pi.id == pp[pj_id].id ) continue;
PS::F64vec xj = pp[pj_id].pos;
PS::F64vec dr = xj - xi;
PS::F64 dr2 = dr * dr;
assert( dr2 != 0.0 );
dr2 += eps2;
PS::F64vec vj = pp[pj_id].vel;
PS::F64vec dv = vj - vi;
PS::F64 massj = pp[pj_id].mass;
PS::F64 rij = sqrt(dr2);
PS::F64 rinv = 1. / rij;
PS::F64 r2inv = rinv * rinv;
PS::F64 r3inv = r2inv * rinv;
//PS::F64 r5inv = r3inv * r2inv;
#ifdef USE_INDIVIDUAL_CUTOFF
PS::F64 r_out_inv = std::min(pi.r_out_inv, pp[pj_id].r_out_inv);
#else
PS::F64 r_out_inv = FP_t::r_out_inv;
#endif
PS::F64 mj_rij3 = massj * r3inv;
PS::F64 alpha = (dr*dv) * r2inv;
PS::F64 _W = 1.-cutoff_W(rij, r_out_inv);
PS::F64 _K = 1.-cutoff_K(rij, r_out_inv);
PS::F64 dKdt = cutoff_dKdt(rij, r_out_inv, alpha);
PS::F64 alpha_c = alpha*_K;
pi.phi_d -= massj * rinv * _W;
pi.acc_d += mj_rij3 * _K * dr;
pi.jerk_d += mj_rij3 * ( _K * dv - (3.*alpha_c + dKdt) * dr );
}
}
|
Act3A01283525.c | //Ian De La Garza González A01283525
//Este programa calcula el integral de la funcion de f(x)=3*sin(2x)+4
//De 1 a 4
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include <stdlib.h>
double function(double x) {
return 3*sin(2*x)+4;
}
int main(int argc, char* argv[]){
//limites
double l = atof(argv[1]), r = atof(argv[2]);
//pasos
int nsteps = 1000000;
double step = (r-l)/nsteps;
double partial_Sum = 0, total_Sum = 0;
#pragma omp parallel private(partial_Sum) shared(l, r, h, nsteps, step, total_Sum)
{
#pragma omp for
for(int i = 1; i <= nsteps; i++){
partial_Sum += function(l + i*step);
}
//Create thread safe region.
#pragma omp critical
{
total_Sum += partial_Sum;
total_Sum = step*total_Sum;
}
}
printf("Total Sum: %f\n", total_Sum);
return 0;
}
|
simde-sse2.h | /* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin x86/sse2.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
* 2017 Hasindu Gamaarachchi <hasindu@unsw.edu.au>
* 2018 Jeff Daily <jeff.daily@amd.com>
*/
#if !defined(SIMDE_X86_SSE2_H)
#define SIMDE_X86_SSE2_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin x86/sse.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin x86/mmx.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_X86_MMX_H)
#define SIMDE_X86_MMX_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-common.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
*/
#if !defined(SIMDE_COMMON_H)
#define SIMDE_COMMON_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin hedley.h :: */
/* Hedley - https://nemequ.github.io/hedley
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to
* the public domain worldwide. This software is distributed without
* any warranty.
*
* For details, see <http://creativecommons.org/publicdomain/zero/1.0/>.
* SPDX-License-Identifier: CC0-1.0
*/
#if !defined(HEDLEY_VERSION) || (HEDLEY_VERSION < 16)
#if defined(HEDLEY_VERSION)
# undef HEDLEY_VERSION
#endif
#define HEDLEY_VERSION 16
#if defined(HEDLEY_STRINGIFY_EX)
# undef HEDLEY_STRINGIFY_EX
#endif
#define HEDLEY_STRINGIFY_EX(x) #x
#if defined(HEDLEY_STRINGIFY)
# undef HEDLEY_STRINGIFY
#endif
#define HEDLEY_STRINGIFY(x) HEDLEY_STRINGIFY_EX(x)
#if defined(HEDLEY_CONCAT_EX)
# undef HEDLEY_CONCAT_EX
#endif
#define HEDLEY_CONCAT_EX(a,b) a##b
#if defined(HEDLEY_CONCAT)
# undef HEDLEY_CONCAT
#endif
#define HEDLEY_CONCAT(a,b) HEDLEY_CONCAT_EX(a,b)
#if defined(HEDLEY_CONCAT3_EX)
# undef HEDLEY_CONCAT3_EX
#endif
#define HEDLEY_CONCAT3_EX(a,b,c) a##b##c
#if defined(HEDLEY_CONCAT3)
# undef HEDLEY_CONCAT3
#endif
#define HEDLEY_CONCAT3(a,b,c) HEDLEY_CONCAT3_EX(a,b,c)
#if defined(HEDLEY_VERSION_ENCODE)
# undef HEDLEY_VERSION_ENCODE
#endif
#define HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision))
#if defined(HEDLEY_VERSION_DECODE_MAJOR)
# undef HEDLEY_VERSION_DECODE_MAJOR
#endif
#define HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000)
#if defined(HEDLEY_VERSION_DECODE_MINOR)
# undef HEDLEY_VERSION_DECODE_MINOR
#endif
#define HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000)
#if defined(HEDLEY_VERSION_DECODE_REVISION)
# undef HEDLEY_VERSION_DECODE_REVISION
#endif
#define HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000)
#if defined(HEDLEY_GNUC_VERSION)
# undef HEDLEY_GNUC_VERSION
#endif
#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__)
# define HEDLEY_GNUC_VERSION HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
#elif defined(__GNUC__)
# define HEDLEY_GNUC_VERSION HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0)
#endif
#if defined(HEDLEY_GNUC_VERSION_CHECK)
# undef HEDLEY_GNUC_VERSION_CHECK
#endif
#if defined(HEDLEY_GNUC_VERSION)
# define HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (HEDLEY_GNUC_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_MSVC_VERSION)
# undef HEDLEY_MSVC_VERSION
#endif
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL)
# define HEDLEY_MSVC_VERSION HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100)
#elif defined(_MSC_FULL_VER) && !defined(__ICL)
# define HEDLEY_MSVC_VERSION HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10)
#elif defined(_MSC_VER) && !defined(__ICL)
# define HEDLEY_MSVC_VERSION HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0)
#endif
#if defined(HEDLEY_MSVC_VERSION_CHECK)
# undef HEDLEY_MSVC_VERSION_CHECK
#endif
#if !defined(HEDLEY_MSVC_VERSION)
# define HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0)
#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
# define HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch)))
#elif defined(_MSC_VER) && (_MSC_VER >= 1200)
# define HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch)))
#else
# define HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor)))
#endif
#if defined(HEDLEY_INTEL_VERSION)
# undef HEDLEY_INTEL_VERSION
#endif
#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL)
# define HEDLEY_INTEL_VERSION HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE)
#elif defined(__INTEL_COMPILER) && !defined(__ICL)
# define HEDLEY_INTEL_VERSION HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0)
#endif
#if defined(HEDLEY_INTEL_VERSION_CHECK)
# undef HEDLEY_INTEL_VERSION_CHECK
#endif
#if defined(HEDLEY_INTEL_VERSION)
# define HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (HEDLEY_INTEL_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_INTEL_CL_VERSION)
# undef HEDLEY_INTEL_CL_VERSION
#endif
#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL)
# define HEDLEY_INTEL_CL_VERSION HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0)
#endif
#if defined(HEDLEY_INTEL_CL_VERSION_CHECK)
# undef HEDLEY_INTEL_CL_VERSION_CHECK
#endif
#if defined(HEDLEY_INTEL_CL_VERSION)
# define HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (HEDLEY_INTEL_CL_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_PGI_VERSION)
# undef HEDLEY_PGI_VERSION
#endif
#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__)
# define HEDLEY_PGI_VERSION HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__)
#endif
#if defined(HEDLEY_PGI_VERSION_CHECK)
# undef HEDLEY_PGI_VERSION_CHECK
#endif
#if defined(HEDLEY_PGI_VERSION)
# define HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (HEDLEY_PGI_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_SUNPRO_VERSION)
# undef HEDLEY_SUNPRO_VERSION
#endif
#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000)
# define HEDLEY_SUNPRO_VERSION HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10)
#elif defined(__SUNPRO_C)
# define HEDLEY_SUNPRO_VERSION HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf)
#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000)
# define HEDLEY_SUNPRO_VERSION HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10)
#elif defined(__SUNPRO_CC)
# define HEDLEY_SUNPRO_VERSION HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf)
#endif
#if defined(HEDLEY_SUNPRO_VERSION_CHECK)
# undef HEDLEY_SUNPRO_VERSION_CHECK
#endif
#if defined(HEDLEY_SUNPRO_VERSION)
# define HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (HEDLEY_SUNPRO_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_EMSCRIPTEN_VERSION)
# undef HEDLEY_EMSCRIPTEN_VERSION
#endif
#if defined(__EMSCRIPTEN__)
# define HEDLEY_EMSCRIPTEN_VERSION HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__)
#endif
#if defined(HEDLEY_EMSCRIPTEN_VERSION_CHECK)
# undef HEDLEY_EMSCRIPTEN_VERSION_CHECK
#endif
#if defined(HEDLEY_EMSCRIPTEN_VERSION)
# define HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (HEDLEY_EMSCRIPTEN_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_ARM_VERSION)
# undef HEDLEY_ARM_VERSION
#endif
#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION)
# define HEDLEY_ARM_VERSION HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100)
#elif defined(__CC_ARM) && defined(__ARMCC_VERSION)
# define HEDLEY_ARM_VERSION HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100)
#endif
#if defined(HEDLEY_ARM_VERSION_CHECK)
# undef HEDLEY_ARM_VERSION_CHECK
#endif
#if defined(HEDLEY_ARM_VERSION)
# define HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (HEDLEY_ARM_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_IBM_VERSION)
# undef HEDLEY_IBM_VERSION
#endif
#if defined(__ibmxl__)
# define HEDLEY_IBM_VERSION HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__)
#elif defined(__xlC__) && defined(__xlC_ver__)
# define HEDLEY_IBM_VERSION HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff)
#elif defined(__xlC__)
# define HEDLEY_IBM_VERSION HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0)
#endif
#if defined(HEDLEY_IBM_VERSION_CHECK)
# undef HEDLEY_IBM_VERSION_CHECK
#endif
#if defined(HEDLEY_IBM_VERSION)
# define HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (HEDLEY_IBM_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_VERSION)
# undef HEDLEY_TI_VERSION
#endif
#if \
defined(__TI_COMPILER_VERSION__) && \
( \
defined(__TMS470__) || defined(__TI_ARM__) || \
defined(__MSP430__) || \
defined(__TMS320C2000__) \
)
# if (__TI_COMPILER_VERSION__ >= 16000000)
# define HEDLEY_TI_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
# endif
#endif
#if defined(HEDLEY_TI_VERSION_CHECK)
# undef HEDLEY_TI_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_VERSION)
# define HEDLEY_TI_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_CL2000_VERSION)
# undef HEDLEY_TI_CL2000_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__)
# define HEDLEY_TI_CL2000_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(HEDLEY_TI_CL2000_VERSION_CHECK)
# undef HEDLEY_TI_CL2000_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_CL2000_VERSION)
# define HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_CL2000_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_CL430_VERSION)
# undef HEDLEY_TI_CL430_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__)
# define HEDLEY_TI_CL430_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(HEDLEY_TI_CL430_VERSION_CHECK)
# undef HEDLEY_TI_CL430_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_CL430_VERSION)
# define HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_CL430_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_ARMCL_VERSION)
# undef HEDLEY_TI_ARMCL_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__))
# define HEDLEY_TI_ARMCL_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(HEDLEY_TI_ARMCL_VERSION_CHECK)
# undef HEDLEY_TI_ARMCL_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_ARMCL_VERSION)
# define HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_ARMCL_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_CL6X_VERSION)
# undef HEDLEY_TI_CL6X_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__)
# define HEDLEY_TI_CL6X_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(HEDLEY_TI_CL6X_VERSION_CHECK)
# undef HEDLEY_TI_CL6X_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_CL6X_VERSION)
# define HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_CL6X_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_CL7X_VERSION)
# undef HEDLEY_TI_CL7X_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__)
# define HEDLEY_TI_CL7X_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(HEDLEY_TI_CL7X_VERSION_CHECK)
# undef HEDLEY_TI_CL7X_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_CL7X_VERSION)
# define HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_CL7X_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TI_CLPRU_VERSION)
# undef HEDLEY_TI_CLPRU_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__)
# define HEDLEY_TI_CLPRU_VERSION HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(HEDLEY_TI_CLPRU_VERSION_CHECK)
# undef HEDLEY_TI_CLPRU_VERSION_CHECK
#endif
#if defined(HEDLEY_TI_CLPRU_VERSION)
# define HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (HEDLEY_TI_CLPRU_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_CRAY_VERSION)
# undef HEDLEY_CRAY_VERSION
#endif
#if defined(_CRAYC)
# if defined(_RELEASE_PATCHLEVEL)
# define HEDLEY_CRAY_VERSION HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL)
# else
# define HEDLEY_CRAY_VERSION HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0)
# endif
#endif
#if defined(HEDLEY_CRAY_VERSION_CHECK)
# undef HEDLEY_CRAY_VERSION_CHECK
#endif
#if defined(HEDLEY_CRAY_VERSION)
# define HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (HEDLEY_CRAY_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_IAR_VERSION)
# undef HEDLEY_IAR_VERSION
#endif
#if defined(__IAR_SYSTEMS_ICC__)
# if __VER__ > 1000
# define HEDLEY_IAR_VERSION HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000))
# else
# define HEDLEY_IAR_VERSION HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0)
# endif
#endif
#if defined(HEDLEY_IAR_VERSION_CHECK)
# undef HEDLEY_IAR_VERSION_CHECK
#endif
#if defined(HEDLEY_IAR_VERSION)
# define HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (HEDLEY_IAR_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_TINYC_VERSION)
# undef HEDLEY_TINYC_VERSION
#endif
#if defined(__TINYC__)
# define HEDLEY_TINYC_VERSION HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100)
#endif
#if defined(HEDLEY_TINYC_VERSION_CHECK)
# undef HEDLEY_TINYC_VERSION_CHECK
#endif
#if defined(HEDLEY_TINYC_VERSION)
# define HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (HEDLEY_TINYC_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_DMC_VERSION)
# undef HEDLEY_DMC_VERSION
#endif
#if defined(__DMC__)
# define HEDLEY_DMC_VERSION HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf)
#endif
#if defined(HEDLEY_DMC_VERSION_CHECK)
# undef HEDLEY_DMC_VERSION_CHECK
#endif
#if defined(HEDLEY_DMC_VERSION)
# define HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (HEDLEY_DMC_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_COMPCERT_VERSION)
# undef HEDLEY_COMPCERT_VERSION
#endif
#if defined(__COMPCERT_VERSION__)
# define HEDLEY_COMPCERT_VERSION HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100)
#endif
#if defined(HEDLEY_COMPCERT_VERSION_CHECK)
# undef HEDLEY_COMPCERT_VERSION_CHECK
#endif
#if defined(HEDLEY_COMPCERT_VERSION)
# define HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (HEDLEY_COMPCERT_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_PELLES_VERSION)
# undef HEDLEY_PELLES_VERSION
#endif
#if defined(__POCC__)
# define HEDLEY_PELLES_VERSION HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0)
#endif
#if defined(HEDLEY_PELLES_VERSION_CHECK)
# undef HEDLEY_PELLES_VERSION_CHECK
#endif
#if defined(HEDLEY_PELLES_VERSION)
# define HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (HEDLEY_PELLES_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_MCST_LCC_VERSION)
# undef HEDLEY_MCST_LCC_VERSION
#endif
#if defined(__LCC__) && defined(__LCC_MINOR__)
# define HEDLEY_MCST_LCC_VERSION HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__)
#endif
#if defined(HEDLEY_MCST_LCC_VERSION_CHECK)
# undef HEDLEY_MCST_LCC_VERSION_CHECK
#endif
#if defined(HEDLEY_MCST_LCC_VERSION)
# define HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (HEDLEY_MCST_LCC_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_GCC_VERSION)
# undef HEDLEY_GCC_VERSION
#endif
#if \
defined(HEDLEY_GNUC_VERSION) && \
!defined(__clang__) && \
!defined(HEDLEY_INTEL_VERSION) && \
!defined(HEDLEY_PGI_VERSION) && \
!defined(HEDLEY_ARM_VERSION) && \
!defined(HEDLEY_CRAY_VERSION) && \
!defined(HEDLEY_TI_VERSION) && \
!defined(HEDLEY_TI_ARMCL_VERSION) && \
!defined(HEDLEY_TI_CL430_VERSION) && \
!defined(HEDLEY_TI_CL2000_VERSION) && \
!defined(HEDLEY_TI_CL6X_VERSION) && \
!defined(HEDLEY_TI_CL7X_VERSION) && \
!defined(HEDLEY_TI_CLPRU_VERSION) && \
!defined(__COMPCERT__) && \
!defined(HEDLEY_MCST_LCC_VERSION)
# define HEDLEY_GCC_VERSION HEDLEY_GNUC_VERSION
#endif
#if defined(HEDLEY_GCC_VERSION_CHECK)
# undef HEDLEY_GCC_VERSION_CHECK
#endif
#if defined(HEDLEY_GCC_VERSION)
# define HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (HEDLEY_GCC_VERSION >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
# define HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(HEDLEY_HAS_ATTRIBUTE)
# undef HEDLEY_HAS_ATTRIBUTE
#endif
#if \
defined(__has_attribute) && \
( \
(!defined(HEDLEY_IAR_VERSION) || HEDLEY_IAR_VERSION_CHECK(8,5,9)) \
)
# define HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute)
#else
# define HEDLEY_HAS_ATTRIBUTE(attribute) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_ATTRIBUTE)
# undef HEDLEY_GNUC_HAS_ATTRIBUTE
#endif
#if defined(__has_attribute)
# define HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_HAS_ATTRIBUTE(attribute)
#else
# define HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_ATTRIBUTE)
# undef HEDLEY_GCC_HAS_ATTRIBUTE
#endif
#if defined(__has_attribute)
# define HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_HAS_ATTRIBUTE(attribute)
#else
# define HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_HAS_CPP_ATTRIBUTE)
# undef HEDLEY_HAS_CPP_ATTRIBUTE
#endif
#if \
defined(__has_cpp_attribute) && \
defined(__cplusplus) && \
(!defined(HEDLEY_SUNPRO_VERSION) || HEDLEY_SUNPRO_VERSION_CHECK(5,15,0))
# define HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute)
#else
# define HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0)
#endif
#if defined(HEDLEY_HAS_CPP_ATTRIBUTE_NS)
# undef HEDLEY_HAS_CPP_ATTRIBUTE_NS
#endif
#if !defined(__cplusplus) || !defined(__has_cpp_attribute)
# define HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
#elif \
!defined(HEDLEY_PGI_VERSION) && \
!defined(HEDLEY_IAR_VERSION) && \
(!defined(HEDLEY_SUNPRO_VERSION) || HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \
(!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,20,0))
# define HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute)
#else
# define HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_CPP_ATTRIBUTE)
# undef HEDLEY_GNUC_HAS_CPP_ATTRIBUTE
#endif
#if defined(__has_cpp_attribute) && defined(__cplusplus)
# define HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
#else
# define HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_CPP_ATTRIBUTE)
# undef HEDLEY_GCC_HAS_CPP_ATTRIBUTE
#endif
#if defined(__has_cpp_attribute) && defined(__cplusplus)
# define HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
#else
# define HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_HAS_BUILTIN)
# undef HEDLEY_HAS_BUILTIN
#endif
#if defined(__has_builtin)
# define HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin)
#else
# define HEDLEY_HAS_BUILTIN(builtin) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_BUILTIN)
# undef HEDLEY_GNUC_HAS_BUILTIN
#endif
#if defined(__has_builtin)
# define HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
#else
# define HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_BUILTIN)
# undef HEDLEY_GCC_HAS_BUILTIN
#endif
#if defined(__has_builtin)
# define HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
#else
# define HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_HAS_FEATURE)
# undef HEDLEY_HAS_FEATURE
#endif
#if defined(__has_feature)
# define HEDLEY_HAS_FEATURE(feature) __has_feature(feature)
#else
# define HEDLEY_HAS_FEATURE(feature) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_FEATURE)
# undef HEDLEY_GNUC_HAS_FEATURE
#endif
#if defined(__has_feature)
# define HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
#else
# define HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_FEATURE)
# undef HEDLEY_GCC_HAS_FEATURE
#endif
#if defined(__has_feature)
# define HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
#else
# define HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_HAS_EXTENSION)
# undef HEDLEY_HAS_EXTENSION
#endif
#if defined(__has_extension)
# define HEDLEY_HAS_EXTENSION(extension) __has_extension(extension)
#else
# define HEDLEY_HAS_EXTENSION(extension) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_EXTENSION)
# undef HEDLEY_GNUC_HAS_EXTENSION
#endif
#if defined(__has_extension)
# define HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
#else
# define HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_EXTENSION)
# undef HEDLEY_GCC_HAS_EXTENSION
#endif
#if defined(__has_extension)
# define HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
#else
# define HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_HAS_DECLSPEC_ATTRIBUTE)
# undef HEDLEY_HAS_DECLSPEC_ATTRIBUTE
#endif
#if defined(__has_declspec_attribute)
# define HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute)
#else
# define HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE)
# undef HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE
#endif
#if defined(__has_declspec_attribute)
# define HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
#else
# define HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE)
# undef HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE
#endif
#if defined(__has_declspec_attribute)
# define HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
#else
# define HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_HAS_WARNING)
# undef HEDLEY_HAS_WARNING
#endif
#if defined(__has_warning)
# define HEDLEY_HAS_WARNING(warning) __has_warning(warning)
#else
# define HEDLEY_HAS_WARNING(warning) (0)
#endif
#if defined(HEDLEY_GNUC_HAS_WARNING)
# undef HEDLEY_GNUC_HAS_WARNING
#endif
#if defined(__has_warning)
# define HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
#else
# define HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_GCC_HAS_WARNING)
# undef HEDLEY_GCC_HAS_WARNING
#endif
#if defined(__has_warning)
# define HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
#else
# define HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
defined(__clang__) || \
HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \
HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \
(HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR))
# define HEDLEY_PRAGMA(value) _Pragma(#value)
#elif HEDLEY_MSVC_VERSION_CHECK(15,0,0)
# define HEDLEY_PRAGMA(value) __pragma(value)
#else
# define HEDLEY_PRAGMA(value)
#endif
#if defined(HEDLEY_DIAGNOSTIC_PUSH)
# undef HEDLEY_DIAGNOSTIC_PUSH
#endif
#if defined(HEDLEY_DIAGNOSTIC_POP)
# undef HEDLEY_DIAGNOSTIC_POP
#endif
#if defined(__clang__)
# define HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push")
# define HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
# define HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
#elif HEDLEY_GCC_VERSION_CHECK(4,6,0)
# define HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push")
# define HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop")
#elif \
HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push))
# define HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop))
#elif HEDLEY_ARM_VERSION_CHECK(5,6,0)
# define HEDLEY_DIAGNOSTIC_PUSH _Pragma("push")
# define HEDLEY_DIAGNOSTIC_POP _Pragma("pop")
#elif \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
# define HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push")
# define HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop")
#elif HEDLEY_PELLES_VERSION_CHECK(2,90,0)
# define HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
# define HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
#else
# define HEDLEY_DIAGNOSTIC_PUSH
# define HEDLEY_DIAGNOSTIC_POP
#endif
/* HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for
HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
#if defined(HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
# undef HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_
#endif
#if defined(__cplusplus)
# if HEDLEY_HAS_WARNING("-Wc++98-compat")
# if HEDLEY_HAS_WARNING("-Wc++17-extensions")
# if HEDLEY_HAS_WARNING("-Wc++1z-extensions")
# define HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
_Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \
_Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \
xpr \
HEDLEY_DIAGNOSTIC_POP
# else
# define HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
_Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \
xpr \
HEDLEY_DIAGNOSTIC_POP
# endif
# else
# define HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
xpr \
HEDLEY_DIAGNOSTIC_POP
# endif
# endif
#endif
#if !defined(HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
# define HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x
#endif
#if defined(HEDLEY_CONST_CAST)
# undef HEDLEY_CONST_CAST
#endif
#if defined(__cplusplus)
# define HEDLEY_CONST_CAST(T, expr) (const_cast<T>(expr))
#elif \
HEDLEY_HAS_WARNING("-Wcast-qual") || \
HEDLEY_GCC_VERSION_CHECK(4,6,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
((T) (expr)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
# define HEDLEY_CONST_CAST(T, expr) ((T) (expr))
#endif
#if defined(HEDLEY_REINTERPRET_CAST)
# undef HEDLEY_REINTERPRET_CAST
#endif
#if defined(__cplusplus)
# define HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast<T>(expr))
#else
# define HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr))
#endif
#if defined(HEDLEY_STATIC_CAST)
# undef HEDLEY_STATIC_CAST
#endif
#if defined(__cplusplus)
# define HEDLEY_STATIC_CAST(T, expr) (static_cast<T>(expr))
#else
# define HEDLEY_STATIC_CAST(T, expr) ((T) (expr))
#endif
#if defined(HEDLEY_CPP_CAST)
# undef HEDLEY_CPP_CAST
#endif
#if defined(__cplusplus)
# if HEDLEY_HAS_WARNING("-Wold-style-cast")
# define HEDLEY_CPP_CAST(T, expr) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \
((T) (expr)) \
HEDLEY_DIAGNOSTIC_POP
# elif HEDLEY_IAR_VERSION_CHECK(8,3,0)
# define HEDLEY_CPP_CAST(T, expr) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("diag_suppress=Pe137") \
HEDLEY_DIAGNOSTIC_POP
# else
# define HEDLEY_CPP_CAST(T, expr) ((T) (expr))
# endif
#else
# define HEDLEY_CPP_CAST(T, expr) (expr)
#endif
#if defined(HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED)
# undef HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
#endif
#if HEDLEY_HAS_WARNING("-Wdeprecated-declarations")
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)")
#elif HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786))
#elif HEDLEY_PGI_VERSION_CHECK(20,7,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445")
#elif HEDLEY_PGI_VERSION_CHECK(17,10,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#elif HEDLEY_MSVC_VERSION_CHECK(15,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996))
#elif HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444")
#elif \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)")
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215")
#elif HEDLEY_PELLES_VERSION_CHECK(2,90,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)")
#else
# define HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
#endif
#if defined(HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS)
# undef HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
#endif
#if HEDLEY_HAS_WARNING("-Wunknown-pragmas")
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)")
#elif HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161))
#elif HEDLEY_PGI_VERSION_CHECK(17,10,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"")
#elif HEDLEY_MSVC_VERSION_CHECK(15,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068))
#elif \
HEDLEY_TI_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
#elif HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161")
#elif HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161")
#else
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
#endif
#if defined(HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES)
# undef HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
#endif
#if HEDLEY_HAS_WARNING("-Wunknown-attributes")
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,6,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#elif HEDLEY_INTEL_VERSION_CHECK(17,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)")
#elif HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292))
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030))
#elif HEDLEY_PGI_VERSION_CHECK(20,7,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098")
#elif HEDLEY_PGI_VERSION_CHECK(17,10,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)")
#elif \
HEDLEY_TI_VERSION_CHECK(18,1,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173")
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097")
#elif HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097")
#else
# define HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
#endif
#if defined(HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL)
# undef HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
#endif
#if HEDLEY_HAS_WARNING("-Wcast-qual")
# define HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)")
#elif HEDLEY_GCC_VERSION_CHECK(3,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
#else
# define HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
#endif
#if defined(HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION)
# undef HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION
#endif
#if HEDLEY_HAS_WARNING("-Wunused-function")
# define HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"")
#elif HEDLEY_GCC_VERSION_CHECK(3,4,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"")
#elif HEDLEY_MSVC_VERSION_CHECK(1,0,0)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505))
#elif HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142")
#else
# define HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION
#endif
#if defined(HEDLEY_DEPRECATED)
# undef HEDLEY_DEPRECATED
#endif
#if defined(HEDLEY_DEPRECATED_FOR)
# undef HEDLEY_DEPRECATED_FOR
#endif
#if \
HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since))
# define HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement))
#elif \
(HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(HEDLEY_IAR_VERSION)) || \
HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \
HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
HEDLEY_TI_VERSION_CHECK(18,1,0) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since)))
# define HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement)))
#elif defined(__cplusplus) && (__cplusplus >= 201402L)
# define HEDLEY_DEPRECATED(since) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]])
# define HEDLEY_DEPRECATED_FOR(since, replacement) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]])
#elif \
HEDLEY_HAS_ATTRIBUTE(deprecated) || \
HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \
HEDLEY_IAR_VERSION_CHECK(8,10,0)
# define HEDLEY_DEPRECATED(since) __attribute__((__deprecated__))
# define HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__))
#elif \
HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_DEPRECATED(since) __declspec(deprecated)
# define HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated)
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_DEPRECATED(since) _Pragma("deprecated")
# define HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated")
#else
# define HEDLEY_DEPRECATED(since)
# define HEDLEY_DEPRECATED_FOR(since, replacement)
#endif
#if defined(HEDLEY_UNAVAILABLE)
# undef HEDLEY_UNAVAILABLE
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(warning) || \
HEDLEY_GCC_VERSION_CHECK(4,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since)))
#else
# define HEDLEY_UNAVAILABLE(available_since)
#endif
#if defined(HEDLEY_WARN_UNUSED_RESULT)
# undef HEDLEY_WARN_UNUSED_RESULT
#endif
#if defined(HEDLEY_WARN_UNUSED_RESULT_MSG)
# undef HEDLEY_WARN_UNUSED_RESULT_MSG
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
(HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \
HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__))
# define HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__))
#elif (HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L)
# define HEDLEY_WARN_UNUSED_RESULT HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
# define HEDLEY_WARN_UNUSED_RESULT_MSG(msg) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]])
#elif HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard)
# define HEDLEY_WARN_UNUSED_RESULT HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
# define HEDLEY_WARN_UNUSED_RESULT_MSG(msg) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
#elif defined(_Check_return_) /* SAL */
# define HEDLEY_WARN_UNUSED_RESULT _Check_return_
# define HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_
#else
# define HEDLEY_WARN_UNUSED_RESULT
# define HEDLEY_WARN_UNUSED_RESULT_MSG(msg)
#endif
#if defined(HEDLEY_SENTINEL)
# undef HEDLEY_SENTINEL
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(sentinel) || \
HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(5,4,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position)))
#else
# define HEDLEY_SENTINEL(position)
#endif
#if defined(HEDLEY_NO_RETURN)
# undef HEDLEY_NO_RETURN
#endif
#if HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_NO_RETURN __noreturn
#elif \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_NO_RETURN __attribute__((__noreturn__))
#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
# define HEDLEY_NO_RETURN _Noreturn
#elif defined(__cplusplus) && (__cplusplus >= 201103L)
# define HEDLEY_NO_RETURN HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]])
#elif \
HEDLEY_HAS_ATTRIBUTE(noreturn) || \
HEDLEY_GCC_VERSION_CHECK(3,2,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_IAR_VERSION_CHECK(8,10,0)
# define HEDLEY_NO_RETURN __attribute__((__noreturn__))
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
# define HEDLEY_NO_RETURN _Pragma("does_not_return")
#elif \
HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_NO_RETURN __declspec(noreturn)
#elif HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus)
# define HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;")
#elif HEDLEY_COMPCERT_VERSION_CHECK(3,2,0)
# define HEDLEY_NO_RETURN __attribute((noreturn))
#elif HEDLEY_PELLES_VERSION_CHECK(9,0,0)
# define HEDLEY_NO_RETURN __declspec(noreturn)
#else
# define HEDLEY_NO_RETURN
#endif
#if defined(HEDLEY_NO_ESCAPE)
# undef HEDLEY_NO_ESCAPE
#endif
#if HEDLEY_HAS_ATTRIBUTE(noescape)
# define HEDLEY_NO_ESCAPE __attribute__((__noescape__))
#else
# define HEDLEY_NO_ESCAPE
#endif
#if defined(HEDLEY_UNREACHABLE)
# undef HEDLEY_UNREACHABLE
#endif
#if defined(HEDLEY_UNREACHABLE_RETURN)
# undef HEDLEY_UNREACHABLE_RETURN
#endif
#if defined(HEDLEY_ASSUME)
# undef HEDLEY_ASSUME
#endif
#if \
HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_ASSUME(expr) __assume(expr)
#elif HEDLEY_HAS_BUILTIN(__builtin_assume)
# define HEDLEY_ASSUME(expr) __builtin_assume(expr)
#elif \
HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0)
# if defined(__cplusplus)
# define HEDLEY_ASSUME(expr) std::_nassert(expr)
# else
# define HEDLEY_ASSUME(expr) _nassert(expr)
# endif
#endif
#if \
(HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(HEDLEY_ARM_VERSION))) || \
HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
HEDLEY_PGI_VERSION_CHECK(18,10,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,5) || \
HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_UNREACHABLE() __builtin_unreachable()
#elif defined(HEDLEY_ASSUME)
# define HEDLEY_UNREACHABLE() HEDLEY_ASSUME(0)
#endif
#if !defined(HEDLEY_ASSUME)
# if defined(HEDLEY_UNREACHABLE)
# define HEDLEY_ASSUME(expr) HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (HEDLEY_UNREACHABLE(), 1)))
# else
# define HEDLEY_ASSUME(expr) HEDLEY_STATIC_CAST(void, expr)
# endif
#endif
#if defined(HEDLEY_UNREACHABLE)
# if \
HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0)
# define HEDLEY_UNREACHABLE_RETURN(value) return (HEDLEY_STATIC_CAST(void, HEDLEY_ASSUME(0)), (value))
# else
# define HEDLEY_UNREACHABLE_RETURN(value) HEDLEY_UNREACHABLE()
# endif
#else
# define HEDLEY_UNREACHABLE_RETURN(value) return (value)
#endif
#if !defined(HEDLEY_UNREACHABLE)
# define HEDLEY_UNREACHABLE() HEDLEY_ASSUME(0)
#endif
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wpedantic")
# pragma clang diagnostic ignored "-Wpedantic"
#endif
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus)
# pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#endif
#if HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0)
# if defined(__clang__)
# pragma clang diagnostic ignored "-Wvariadic-macros"
# elif defined(HEDLEY_GCC_VERSION)
# pragma GCC diagnostic ignored "-Wvariadic-macros"
# endif
#endif
#if defined(HEDLEY_NON_NULL)
# undef HEDLEY_NON_NULL
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(nonnull) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0)
# define HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__)))
#else
# define HEDLEY_NON_NULL(...)
#endif
HEDLEY_DIAGNOSTIC_POP
#if defined(HEDLEY_PRINTF_FORMAT)
# undef HEDLEY_PRINTF_FORMAT
#endif
#if defined(__MINGW32__) && HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO)
# define HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check)))
#elif defined(__MINGW32__) && HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO)
# define HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check)))
#elif \
HEDLEY_HAS_ATTRIBUTE(format) || \
HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check)))
#elif HEDLEY_PELLES_VERSION_CHECK(6,0,0)
# define HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check))
#else
# define HEDLEY_PRINTF_FORMAT(string_idx,first_to_check)
#endif
#if defined(HEDLEY_CONSTEXPR)
# undef HEDLEY_CONSTEXPR
#endif
#if defined(__cplusplus)
# if __cplusplus >= 201103L
# define HEDLEY_CONSTEXPR HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr)
# endif
#endif
#if !defined(HEDLEY_CONSTEXPR)
# define HEDLEY_CONSTEXPR
#endif
#if defined(HEDLEY_PREDICT)
# undef HEDLEY_PREDICT
#endif
#if defined(HEDLEY_LIKELY)
# undef HEDLEY_LIKELY
#endif
#if defined(HEDLEY_UNLIKELY)
# undef HEDLEY_UNLIKELY
#endif
#if defined(HEDLEY_UNPREDICTABLE)
# undef HEDLEY_UNPREDICTABLE
#endif
#if HEDLEY_HAS_BUILTIN(__builtin_unpredictable)
# define HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr))
#endif
#if \
(HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(HEDLEY_PGI_VERSION) && !defined(HEDLEY_INTEL_VERSION)) || \
HEDLEY_GCC_VERSION_CHECK(9,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability))
# define HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability))
# define HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability))
# define HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 )
# define HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 )
#elif \
(HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(HEDLEY_INTEL_CL_VERSION)) || \
HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
(HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_PREDICT(expr, expected, probability) \
(((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (HEDLEY_STATIC_CAST(void, expected), (expr)))
# define HEDLEY_PREDICT_TRUE(expr, probability) \
(__extension__ ({ \
double hedley_probability_ = (probability); \
((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \
}))
# define HEDLEY_PREDICT_FALSE(expr, probability) \
(__extension__ ({ \
double hedley_probability_ = (probability); \
((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \
}))
# define HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1)
# define HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0)
#else
# define HEDLEY_PREDICT(expr, expected, probability) (HEDLEY_STATIC_CAST(void, expected), (expr))
# define HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr))
# define HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr))
# define HEDLEY_LIKELY(expr) (!!(expr))
# define HEDLEY_UNLIKELY(expr) (!!(expr))
#endif
#if !defined(HEDLEY_UNPREDICTABLE)
# define HEDLEY_UNPREDICTABLE(expr) HEDLEY_PREDICT(expr, 1, 0.5)
#endif
#if defined(HEDLEY_MALLOC)
# undef HEDLEY_MALLOC
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(malloc) || \
HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_MALLOC __attribute__((__malloc__))
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
# define HEDLEY_MALLOC _Pragma("returns_new_memory")
#elif \
HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_MALLOC __declspec(restrict)
#else
# define HEDLEY_MALLOC
#endif
#if defined(HEDLEY_PURE)
# undef HEDLEY_PURE
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(pure) || \
HEDLEY_GCC_VERSION_CHECK(2,96,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_PURE __attribute__((__pure__))
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
# define HEDLEY_PURE _Pragma("does_not_write_global_data")
#elif defined(__cplusplus) && \
( \
HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \
)
# define HEDLEY_PURE _Pragma("FUNC_IS_PURE;")
#else
# define HEDLEY_PURE
#endif
#if defined(HEDLEY_CONST)
# undef HEDLEY_CONST
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(const) || \
HEDLEY_GCC_VERSION_CHECK(2,5,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_CONST __attribute__((__const__))
#elif \
HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
# define HEDLEY_CONST _Pragma("no_side_effect")
#else
# define HEDLEY_CONST HEDLEY_PURE
#endif
#if defined(HEDLEY_RESTRICT)
# undef HEDLEY_RESTRICT
#endif
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus)
# define HEDLEY_RESTRICT restrict
#elif \
HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
(HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \
HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
defined(__clang__) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_RESTRICT __restrict
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus)
# define HEDLEY_RESTRICT _Restrict
#else
# define HEDLEY_RESTRICT
#endif
#if defined(HEDLEY_INLINE)
# undef HEDLEY_INLINE
#endif
#if \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
(defined(__cplusplus) && (__cplusplus >= 199711L))
# define HEDLEY_INLINE inline
#elif \
defined(HEDLEY_GCC_VERSION) || \
HEDLEY_ARM_VERSION_CHECK(6,2,0)
# define HEDLEY_INLINE __inline__
#elif \
HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_INLINE __inline
#else
# define HEDLEY_INLINE
#endif
#if defined(HEDLEY_ALWAYS_INLINE)
# undef HEDLEY_ALWAYS_INLINE
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(always_inline) || \
HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \
HEDLEY_IAR_VERSION_CHECK(8,10,0)
# define HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) HEDLEY_INLINE
#elif \
HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_ALWAYS_INLINE __forceinline
#elif defined(__cplusplus) && \
( \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \
)
# define HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;")
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_ALWAYS_INLINE _Pragma("inline=forced")
#else
# define HEDLEY_ALWAYS_INLINE HEDLEY_INLINE
#endif
#if defined(HEDLEY_NEVER_INLINE)
# undef HEDLEY_NEVER_INLINE
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(noinline) || \
HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \
HEDLEY_IAR_VERSION_CHECK(8,10,0)
# define HEDLEY_NEVER_INLINE __attribute__((__noinline__))
#elif \
HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_NEVER_INLINE __declspec(noinline)
#elif HEDLEY_PGI_VERSION_CHECK(10,2,0)
# define HEDLEY_NEVER_INLINE _Pragma("noinline")
#elif HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus)
# define HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;")
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_NEVER_INLINE _Pragma("inline=never")
#elif HEDLEY_COMPCERT_VERSION_CHECK(3,2,0)
# define HEDLEY_NEVER_INLINE __attribute((noinline))
#elif HEDLEY_PELLES_VERSION_CHECK(9,0,0)
# define HEDLEY_NEVER_INLINE __declspec(noinline)
#else
# define HEDLEY_NEVER_INLINE
#endif
#if defined(HEDLEY_PRIVATE)
# undef HEDLEY_PRIVATE
#endif
#if defined(HEDLEY_PUBLIC)
# undef HEDLEY_PUBLIC
#endif
#if defined(HEDLEY_IMPORT)
# undef HEDLEY_IMPORT
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
# define HEDLEY_PRIVATE
# define HEDLEY_PUBLIC __declspec(dllexport)
# define HEDLEY_IMPORT __declspec(dllimport)
#else
# if \
HEDLEY_HAS_ATTRIBUTE(visibility) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
( \
defined(__TI_EABI__) && \
( \
(HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \
) \
) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_PRIVATE __attribute__((__visibility__("hidden")))
# define HEDLEY_PUBLIC __attribute__((__visibility__("default")))
# else
# define HEDLEY_PRIVATE
# define HEDLEY_PUBLIC
# endif
# define HEDLEY_IMPORT extern
#endif
#if defined(HEDLEY_NO_THROW)
# undef HEDLEY_NO_THROW
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(nothrow) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_NO_THROW __attribute__((__nothrow__))
#elif \
HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0)
# define HEDLEY_NO_THROW __declspec(nothrow)
#else
# define HEDLEY_NO_THROW
#endif
#if defined(HEDLEY_FALL_THROUGH)
# undef HEDLEY_FALL_THROUGH
#endif
#if defined(HEDLEY_INTEL_VERSION)
# define HEDLEY_FALL_THROUGH
#elif \
HEDLEY_HAS_ATTRIBUTE(fallthrough) || \
HEDLEY_GCC_VERSION_CHECK(7,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_FALL_THROUGH __attribute__((__fallthrough__))
#elif HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough)
# define HEDLEY_FALL_THROUGH HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]])
#elif HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough)
# define HEDLEY_FALL_THROUGH HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]])
#elif defined(__fallthrough) /* SAL */
# define HEDLEY_FALL_THROUGH __fallthrough
#else
# define HEDLEY_FALL_THROUGH
#endif
#if defined(HEDLEY_RETURNS_NON_NULL)
# undef HEDLEY_RETURNS_NON_NULL
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \
HEDLEY_GCC_VERSION_CHECK(4,9,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__))
#elif defined(_Ret_notnull_) /* SAL */
# define HEDLEY_RETURNS_NON_NULL _Ret_notnull_
#else
# define HEDLEY_RETURNS_NON_NULL
#endif
#if defined(HEDLEY_ARRAY_PARAM)
# undef HEDLEY_ARRAY_PARAM
#endif
#if \
defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
!defined(__STDC_NO_VLA__) && \
!defined(__cplusplus) && \
!defined(HEDLEY_PGI_VERSION) && \
!defined(HEDLEY_TINYC_VERSION)
# define HEDLEY_ARRAY_PARAM(name) (name)
#else
# define HEDLEY_ARRAY_PARAM(name)
#endif
#if defined(HEDLEY_IS_CONSTANT)
# undef HEDLEY_IS_CONSTANT
#endif
#if defined(HEDLEY_REQUIRE_CONSTEXPR)
# undef HEDLEY_REQUIRE_CONSTEXPR
#endif
/* HEDLEY_IS_CONSTEXPR_ is for
HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
#if defined(HEDLEY_IS_CONSTEXPR_)
# undef HEDLEY_IS_CONSTEXPR_
#endif
#if \
HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
(HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr)
#endif
#if !defined(__cplusplus)
# if \
HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
HEDLEY_ARM_VERSION_CHECK(5,4,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,24)
# if defined(__INTPTR_TYPE__)
# define HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*)
# else
# include <stdint.h>
# define HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*)
# endif
# elif \
( \
defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
!defined(HEDLEY_SUNPRO_VERSION) && \
!defined(HEDLEY_PGI_VERSION) && \
!defined(HEDLEY_IAR_VERSION)) || \
(HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(HEDLEY_IAR_VERSION)) || \
HEDLEY_GCC_VERSION_CHECK(4,9,0) || \
HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \
HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
HEDLEY_ARM_VERSION_CHECK(5,3,0)
# if defined(__INTPTR_TYPE__)
# define HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0)
# else
# include <stdint.h>
# define HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0)
# endif
# elif \
defined(HEDLEY_GCC_VERSION) || \
defined(HEDLEY_INTEL_VERSION) || \
defined(HEDLEY_TINYC_VERSION) || \
defined(HEDLEY_TI_ARMCL_VERSION) || \
HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \
defined(HEDLEY_TI_CL2000_VERSION) || \
defined(HEDLEY_TI_CL6X_VERSION) || \
defined(HEDLEY_TI_CL7X_VERSION) || \
defined(HEDLEY_TI_CLPRU_VERSION) || \
defined(__clang__)
# define HEDLEY_IS_CONSTEXPR_(expr) ( \
sizeof(void) != \
sizeof(*( \
1 ? \
((void*) ((expr) * 0L) ) : \
((struct { char v[sizeof(void) * 2]; } *) 1) \
) \
) \
)
# endif
#endif
#if defined(HEDLEY_IS_CONSTEXPR_)
# if !defined(HEDLEY_IS_CONSTANT)
# define HEDLEY_IS_CONSTANT(expr) HEDLEY_IS_CONSTEXPR_(expr)
# endif
# define HEDLEY_REQUIRE_CONSTEXPR(expr) (HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1))
#else
# if !defined(HEDLEY_IS_CONSTANT)
# define HEDLEY_IS_CONSTANT(expr) (0)
# endif
# define HEDLEY_REQUIRE_CONSTEXPR(expr) (expr)
#endif
#if defined(HEDLEY_BEGIN_C_DECLS)
# undef HEDLEY_BEGIN_C_DECLS
#endif
#if defined(HEDLEY_END_C_DECLS)
# undef HEDLEY_END_C_DECLS
#endif
#if defined(HEDLEY_C_DECL)
# undef HEDLEY_C_DECL
#endif
#if defined(__cplusplus)
# define HEDLEY_BEGIN_C_DECLS extern "C" {
# define HEDLEY_END_C_DECLS }
# define HEDLEY_C_DECL extern "C"
#else
# define HEDLEY_BEGIN_C_DECLS
# define HEDLEY_END_C_DECLS
# define HEDLEY_C_DECL
#endif
#if defined(HEDLEY_STATIC_ASSERT)
# undef HEDLEY_STATIC_ASSERT
#endif
#if \
!defined(__cplusplus) && ( \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \
(HEDLEY_HAS_FEATURE(c_static_assert) && !defined(HEDLEY_INTEL_CL_VERSION)) || \
HEDLEY_GCC_VERSION_CHECK(6,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
defined(_Static_assert) \
)
# define HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message)
#elif \
(defined(__cplusplus) && (__cplusplus >= 201103L)) || \
HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_STATIC_ASSERT(expr, message) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message))
#else
# define HEDLEY_STATIC_ASSERT(expr, message)
#endif
#if defined(HEDLEY_NULL)
# undef HEDLEY_NULL
#endif
#if defined(__cplusplus)
# if __cplusplus >= 201103L
# define HEDLEY_NULL HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr)
# elif defined(NULL)
# define HEDLEY_NULL NULL
# else
# define HEDLEY_NULL HEDLEY_STATIC_CAST(void*, 0)
# endif
#elif defined(NULL)
# define HEDLEY_NULL NULL
#else
# define HEDLEY_NULL ((void*) 0)
#endif
#if defined(HEDLEY_MESSAGE)
# undef HEDLEY_MESSAGE
#endif
#if HEDLEY_HAS_WARNING("-Wunknown-pragmas")
# define HEDLEY_MESSAGE(msg) \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \
HEDLEY_PRAGMA(message msg) \
HEDLEY_DIAGNOSTIC_POP
#elif \
HEDLEY_GCC_VERSION_CHECK(4,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_MESSAGE(msg) HEDLEY_PRAGMA(message msg)
#elif HEDLEY_CRAY_VERSION_CHECK(5,0,0)
# define HEDLEY_MESSAGE(msg) HEDLEY_PRAGMA(_CRI message msg)
#elif HEDLEY_IAR_VERSION_CHECK(8,0,0)
# define HEDLEY_MESSAGE(msg) HEDLEY_PRAGMA(message(msg))
#elif HEDLEY_PELLES_VERSION_CHECK(2,0,0)
# define HEDLEY_MESSAGE(msg) HEDLEY_PRAGMA(message(msg))
#else
# define HEDLEY_MESSAGE(msg)
#endif
#if defined(HEDLEY_WARNING)
# undef HEDLEY_WARNING
#endif
#if HEDLEY_HAS_WARNING("-Wunknown-pragmas")
# define HEDLEY_WARNING(msg) \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \
HEDLEY_PRAGMA(clang warning msg) \
HEDLEY_DIAGNOSTIC_POP
#elif \
HEDLEY_GCC_VERSION_CHECK(4,8,0) || \
HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define HEDLEY_WARNING(msg) HEDLEY_PRAGMA(GCC warning msg)
#elif \
HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_WARNING(msg) HEDLEY_PRAGMA(message(msg))
#else
# define HEDLEY_WARNING(msg) HEDLEY_MESSAGE(msg)
#endif
#if defined(HEDLEY_REQUIRE)
# undef HEDLEY_REQUIRE
#endif
#if defined(HEDLEY_REQUIRE_MSG)
# undef HEDLEY_REQUIRE_MSG
#endif
#if HEDLEY_HAS_ATTRIBUTE(diagnose_if)
# if HEDLEY_HAS_WARNING("-Wgcc-compat")
# define HEDLEY_REQUIRE(expr) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
__attribute__((diagnose_if(!(expr), #expr, "error"))) \
HEDLEY_DIAGNOSTIC_POP
# define HEDLEY_REQUIRE_MSG(expr,msg) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
__attribute__((diagnose_if(!(expr), msg, "error"))) \
HEDLEY_DIAGNOSTIC_POP
# else
# define HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error")))
# define HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error")))
# endif
#else
# define HEDLEY_REQUIRE(expr)
# define HEDLEY_REQUIRE_MSG(expr,msg)
#endif
#if defined(HEDLEY_FLAGS)
# undef HEDLEY_FLAGS
#endif
#if HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion"))
# define HEDLEY_FLAGS __attribute__((__flag_enum__))
#else
# define HEDLEY_FLAGS
#endif
#if defined(HEDLEY_FLAGS_CAST)
# undef HEDLEY_FLAGS_CAST
#endif
#if HEDLEY_INTEL_VERSION_CHECK(19,0,0)
# define HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("warning(disable:188)") \
((T) (expr)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
# define HEDLEY_FLAGS_CAST(T, expr) HEDLEY_STATIC_CAST(T, expr)
#endif
#if defined(HEDLEY_EMPTY_BASES)
# undef HEDLEY_EMPTY_BASES
#endif
#if \
(HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \
HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
# define HEDLEY_EMPTY_BASES __declspec(empty_bases)
#else
# define HEDLEY_EMPTY_BASES
#endif
/* Remaining macros are deprecated. */
#if defined(HEDLEY_GCC_NOT_CLANG_VERSION_CHECK)
# undef HEDLEY_GCC_NOT_CLANG_VERSION_CHECK
#endif
#if defined(__clang__)
# define HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0)
#else
# define HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(HEDLEY_CLANG_HAS_ATTRIBUTE)
# undef HEDLEY_CLANG_HAS_ATTRIBUTE
#endif
#define HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) HEDLEY_HAS_ATTRIBUTE(attribute)
#if defined(HEDLEY_CLANG_HAS_CPP_ATTRIBUTE)
# undef HEDLEY_CLANG_HAS_CPP_ATTRIBUTE
#endif
#define HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) HEDLEY_HAS_CPP_ATTRIBUTE(attribute)
#if defined(HEDLEY_CLANG_HAS_BUILTIN)
# undef HEDLEY_CLANG_HAS_BUILTIN
#endif
#define HEDLEY_CLANG_HAS_BUILTIN(builtin) HEDLEY_HAS_BUILTIN(builtin)
#if defined(HEDLEY_CLANG_HAS_FEATURE)
# undef HEDLEY_CLANG_HAS_FEATURE
#endif
#define HEDLEY_CLANG_HAS_FEATURE(feature) HEDLEY_HAS_FEATURE(feature)
#if defined(HEDLEY_CLANG_HAS_EXTENSION)
# undef HEDLEY_CLANG_HAS_EXTENSION
#endif
#define HEDLEY_CLANG_HAS_EXTENSION(extension) HEDLEY_HAS_EXTENSION(extension)
#if defined(HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE)
# undef HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE
#endif
#define HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute)
#if defined(HEDLEY_CLANG_HAS_WARNING)
# undef HEDLEY_CLANG_HAS_WARNING
#endif
#define HEDLEY_CLANG_HAS_WARNING(warning) HEDLEY_HAS_WARNING(warning)
#endif /* !defined(HEDLEY_VERSION) || (HEDLEY_VERSION < X) */
/* :: End hedley.h :: */
#define SIMDE_VERSION_MAJOR 0
#define SIMDE_VERSION_MINOR 7
#define SIMDE_VERSION_MICRO 3
#define SIMDE_VERSION HEDLEY_VERSION_ENCODE(SIMDE_VERSION_MAJOR, SIMDE_VERSION_MINOR, SIMDE_VERSION_MICRO)
// Also update meson.build in the root directory of the repository
#include <stddef.h>
#include <stdint.h>
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-detect-clang.h :: */
/* Detect Clang Version
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to
* the public domain worldwide. This software is distributed without
* any warranty.
*
* For details, see <http://creativecommons.org/publicdomain/zero/1.0/>.
* SPDX-License-Identifier: CC0-1.0
*/
/* This file was originally part of SIMDe
* (<https://github.com/simd-everywhere/simde>). You're free to do with it as
* you please, but I do have a few small requests:
*
* * If you make improvements, please submit them back to SIMDe
* (at <https://github.com/simd-everywhere/simde/issues>) so others can
* benefit from them.
* * Please keep a link to SIMDe intact so people know where to submit
* improvements.
* * If you expose it publicly, please change the SIMDE_ prefix to
* something specific to your project.
*
* The version numbers clang exposes (in the ___clang_major__,
* __clang_minor__, and __clang_patchlevel__ macros) are unreliable.
* Vendors such as Apple will define these values to their version
* numbers; for example, "Apple Clang 4.0" is really clang 3.1, but
* __clang_major__ and __clang_minor__ are defined to 4 and 0
* respectively, instead of 3 and 1.
*
* The solution is *usually* to use clang's feature detection macros
* (<https://clang.llvm.org/docs/LanguageExtensions.html#feature-checking-macros>)
* to determine if the feature you're interested in is available. This
* generally works well, and it should probably be the first thing you
* try. Unfortunately, it's not possible to check for everything. In
* particular, compiler bugs.
*
* This file just uses the feature checking macros to detect features
* added in specific versions of clang to identify which version of
* clang the compiler is based on.
*
* Right now it only goes back to 3.6, but I'm happy to accept patches
* to go back further. And, of course, newer versions are welcome if
* they're not already present, and if you find a way to detect a point
* release that would be great, too!
*/
#if !defined(SIMDE_DETECT_CLANG_H)
#define SIMDE_DETECT_CLANG_H 1
/* Attempt to detect the upstream clang version number. I usually only
* worry about major version numbers (at least for 4.0+), but if you
* need more resolution I'm happy to accept patches that are able to
* detect minor versions as well. That said, you'll probably have a
* hard time with detection since AFAIK most minor releases don't add
* anything we can detect. */
#if defined(__clang__) && !defined(SIMDE_DETECT_CLANG_VERSION)
# if __has_warning("-Wformat-insufficient-args")
# define SIMDE_DETECT_CLANG_VERSION 120000
# elif __has_warning("-Wimplicit-const-int-float-conversion")
# define SIMDE_DETECT_CLANG_VERSION 110000
# elif __has_warning("-Wmisleading-indentation")
# define SIMDE_DETECT_CLANG_VERSION 100000
# elif defined(__FILE_NAME__)
# define SIMDE_DETECT_CLANG_VERSION 90000
# elif __has_warning("-Wextra-semi-stmt") || __has_builtin(__builtin_rotateleft32)
# define SIMDE_DETECT_CLANG_VERSION 80000
# elif __has_warning("-Wc++98-compat-extra-semi")
# define SIMDE_DETECT_CLANG_VERSION 70000
# elif __has_warning("-Wpragma-pack")
# define SIMDE_DETECT_CLANG_VERSION 60000
# elif __has_warning("-Wbitfield-enum-conversion")
# define SIMDE_DETECT_CLANG_VERSION 50000
# elif __has_attribute(diagnose_if)
# define SIMDE_DETECT_CLANG_VERSION 40000
# elif __has_warning("-Wcomma")
# define SIMDE_DETECT_CLANG_VERSION 39000
# elif __has_warning("-Wdouble-promotion")
# define SIMDE_DETECT_CLANG_VERSION 38000
# elif __has_warning("-Wshift-negative-value")
# define SIMDE_DETECT_CLANG_VERSION 37000
# elif __has_warning("-Wambiguous-ellipsis")
# define SIMDE_DETECT_CLANG_VERSION 36000
# else
# define SIMDE_DETECT_CLANG_VERSION 1
# endif
#endif /* defined(__clang__) && !defined(SIMDE_DETECT_CLANG_VERSION) */
/* The SIMDE_DETECT_CLANG_VERSION_CHECK macro is pretty
* straightforward; it returns true if the compiler is a derivative
* of clang >= the specified version.
*
* Since this file is often (primarily?) useful for working around bugs
* it is also helpful to have a macro which returns true if only if the
* compiler is a version of clang *older* than the specified version to
* make it a bit easier to ifdef regions to add code for older versions,
* such as pragmas to disable a specific warning. */
#if defined(SIMDE_DETECT_CLANG_VERSION)
# define SIMDE_DETECT_CLANG_VERSION_CHECK(major, minor, revision) (SIMDE_DETECT_CLANG_VERSION >= ((major * 10000) + (minor * 1000) + (revision)))
# define SIMDE_DETECT_CLANG_VERSION_NOT(major, minor, revision) (SIMDE_DETECT_CLANG_VERSION < ((major * 10000) + (minor * 1000) + (revision)))
#else
# define SIMDE_DETECT_CLANG_VERSION_CHECK(major, minor, revision) (0)
# define SIMDE_DETECT_CLANG_VERSION_NOT(major, minor, revision) (0)
#endif
#endif /* !defined(SIMDE_DETECT_CLANG_H) */
/* :: End simde-detect-clang.h :: */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-arch.h :: */
/* Architecture detection
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* <https://creativecommons.org/publicdomain/zero/1.0/>
*
* SPDX-License-Identifier: CC0-1.0
*
* Different compilers define different preprocessor macros for the
* same architecture. This is an attempt to provide a single
* interface which is usable on any compiler.
*
* In general, a macro named SIMDE_ARCH_* is defined for each
* architecture the CPU supports. When there are multiple possible
* versions, we try to define the macro to the target version. For
* example, if you want to check for i586+, you could do something
* like:
*
* #if defined(SIMDE_ARCH_X86) && (SIMDE_ARCH_X86 >= 5)
* ...
* #endif
*
* You could also just check that SIMDE_ARCH_X86 >= 5 without checking
* if it's defined first, but some compilers may emit a warning about
* an undefined macro being used (e.g., GCC with -Wundef).
*
* This was originally created for SIMDe
* <https://github.com/simd-everywhere/simde> (hence the prefix), but this
* header has no dependencies and may be used anywhere. It is
* originally based on information from
* <https://sourceforge.net/p/predef/wiki/Architectures/>, though it
* has been enhanced with additional information.
*
* If you improve this file, or find a bug, please file the issue at
* <https://github.com/simd-everywhere/simde/issues>. If you copy this into
* your project, even if you change the prefix, please keep the links
* to SIMDe intact so others know where to report issues, submit
* enhancements, and find the latest version. */
#if !defined(SIMDE_ARCH_H)
#define SIMDE_ARCH_H
/* Alpha
<https://en.wikipedia.org/wiki/DEC_Alpha> */
#if defined(__alpha__) || defined(__alpha) || defined(_M_ALPHA)
# if defined(__alpha_ev6__)
# define SIMDE_ARCH_ALPHA 6
# elif defined(__alpha_ev5__)
# define SIMDE_ARCH_ALPHA 5
# elif defined(__alpha_ev4__)
# define SIMDE_ARCH_ALPHA 4
# else
# define SIMDE_ARCH_ALPHA 1
# endif
#endif
#if defined(SIMDE_ARCH_ALPHA)
# define SIMDE_ARCH_ALPHA_CHECK(version) ((version) <= SIMDE_ARCH_ALPHA)
#else
# define SIMDE_ARCH_ALPHA_CHECK(version) (0)
#endif
/* Atmel AVR
<https://en.wikipedia.org/wiki/Atmel_AVR> */
#if defined(__AVR_ARCH__)
# define SIMDE_ARCH_AVR __AVR_ARCH__
#endif
/* AMD64 / x86_64
<https://en.wikipedia.org/wiki/X86-64> */
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
# if !defined(_M_ARM64EC)
# define SIMDE_ARCH_AMD64 1000
# endif
#endif
/* ARM
<https://en.wikipedia.org/wiki/ARM_architecture> */
#if defined(__ARM_ARCH)
# if __ARM_ARCH > 100
# define SIMDE_ARCH_ARM (__ARM_ARCH)
# else
# define SIMDE_ARCH_ARM (__ARM_ARCH * 100)
# endif
#elif defined(_M_ARM)
# if _M_ARM > 100
# define SIMDE_ARCH_ARM (_M_ARM)
# else
# define SIMDE_ARCH_ARM (_M_ARM * 100)
# endif
#elif defined(_M_ARM64) || defined(_M_ARM64EC)
# define SIMDE_ARCH_ARM 800
#elif defined(__arm__) || defined(__thumb__) || defined(__TARGET_ARCH_ARM) || defined(_ARM) || defined(_M_ARM) || defined(_M_ARM)
# define SIMDE_ARCH_ARM 1
#endif
#if defined(SIMDE_ARCH_ARM)
# define SIMDE_ARCH_ARM_CHECK(major, minor) (((major * 100) + (minor)) <= SIMDE_ARCH_ARM)
#else
# define SIMDE_ARCH_ARM_CHECK(major, minor) (0)
#endif
/* AArch64
<https://en.wikipedia.org/wiki/ARM_architecture> */
#if defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
# define SIMDE_ARCH_AARCH64 1000
#endif
#if defined(SIMDE_ARCH_AARCH64)
# define SIMDE_ARCH_AARCH64_CHECK(version) ((version) <= SIMDE_ARCH_AARCH64)
#else
# define SIMDE_ARCH_AARCH64_CHECK(version) (0)
#endif
/* ARM SIMD ISA extensions */
#if defined(__ARM_NEON) || defined(SIMDE_ARCH_AARCH64)
# if defined(SIMDE_ARCH_AARCH64)
# define SIMDE_ARCH_ARM_NEON SIMDE_ARCH_AARCH64
# elif defined(SIMDE_ARCH_ARM)
# define SIMDE_ARCH_ARM_NEON SIMDE_ARCH_ARM
# endif
#endif
#if defined(__ARM_FEATURE_SVE)
# define SIMDE_ARCH_ARM_SVE
#endif
/* Blackfin
<https://en.wikipedia.org/wiki/Blackfin> */
#if defined(__bfin) || defined(__BFIN__) || defined(__bfin__)
# define SIMDE_ARCH_BLACKFIN 1
#endif
/* CRIS
<https://en.wikipedia.org/wiki/ETRAX_CRIS> */
#if defined(__CRIS_arch_version)
# define SIMDE_ARCH_CRIS __CRIS_arch_version
#elif defined(__cris__) || defined(__cris) || defined(__CRIS) || defined(__CRIS__)
# define SIMDE_ARCH_CRIS 1
#endif
/* Convex
<https://en.wikipedia.org/wiki/Convex_Computer> */
#if defined(__convex_c38__)
# define SIMDE_ARCH_CONVEX 38
#elif defined(__convex_c34__)
# define SIMDE_ARCH_CONVEX 34
#elif defined(__convex_c32__)
# define SIMDE_ARCH_CONVEX 32
#elif defined(__convex_c2__)
# define SIMDE_ARCH_CONVEX 2
#elif defined(__convex__)
# define SIMDE_ARCH_CONVEX 1
#endif
#if defined(SIMDE_ARCH_CONVEX)
# define SIMDE_ARCH_CONVEX_CHECK(version) ((version) <= SIMDE_ARCH_CONVEX)
#else
# define SIMDE_ARCH_CONVEX_CHECK(version) (0)
#endif
/* Adapteva Epiphany
<https://en.wikipedia.org/wiki/Adapteva_Epiphany> */
#if defined(__epiphany__)
# define SIMDE_ARCH_EPIPHANY 1
#endif
/* Fujitsu FR-V
<https://en.wikipedia.org/wiki/FR-V_(microprocessor)> */
#if defined(__frv__)
# define SIMDE_ARCH_FRV 1
#endif
/* H8/300
<https://en.wikipedia.org/wiki/H8_Family> */
#if defined(__H8300__)
# define SIMDE_ARCH_H8300
#endif
/* Elbrus (8S, 8SV and successors)
<https://en.wikipedia.org/wiki/Elbrus-8S> */
#if defined(__e2k__)
# define SIMDE_ARCH_E2K
#endif
/* HP/PA / PA-RISC
<https://en.wikipedia.org/wiki/PA-RISC> */
#if defined(__PA8000__) || defined(__HPPA20__) || defined(__RISC2_0__) || defined(_PA_RISC2_0)
# define SIMDE_ARCH_HPPA 20
#elif defined(__PA7100__) || defined(__HPPA11__) || defined(_PA_RISC1_1)
# define SIMDE_ARCH_HPPA 11
#elif defined(_PA_RISC1_0)
# define SIMDE_ARCH_HPPA 10
#elif defined(__hppa__) || defined(__HPPA__) || defined(__hppa)
# define SIMDE_ARCH_HPPA 1
#endif
#if defined(SIMDE_ARCH_HPPA)
# define SIMDE_ARCH_HPPA_CHECK(version) ((version) <= SIMDE_ARCH_HPPA)
#else
# define SIMDE_ARCH_HPPA_CHECK(version) (0)
#endif
/* x86
<https://en.wikipedia.org/wiki/X86> */
#if defined(_M_IX86)
# define SIMDE_ARCH_X86 (_M_IX86 / 100)
#elif defined(__I86__)
# define SIMDE_ARCH_X86 __I86__
#elif defined(i686) || defined(__i686) || defined(__i686__)
# define SIMDE_ARCH_X86 6
#elif defined(i586) || defined(__i586) || defined(__i586__)
# define SIMDE_ARCH_X86 5
#elif defined(i486) || defined(__i486) || defined(__i486__)
# define SIMDE_ARCH_X86 4
#elif defined(i386) || defined(__i386) || defined(__i386__)
# define SIMDE_ARCH_X86 3
#elif defined(_X86_) || defined(__X86__) || defined(__THW_INTEL__)
# define SIMDE_ARCH_X86 3
#endif
#if defined(SIMDE_ARCH_X86)
# define SIMDE_ARCH_X86_CHECK(version) ((version) <= SIMDE_ARCH_X86)
#else
# define SIMDE_ARCH_X86_CHECK(version) (0)
#endif
/* SIMD ISA extensions for x86/x86_64 and Elbrus */
#if defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64) || defined(SIMDE_ARCH_E2K)
# if defined(_M_IX86_FP)
# define SIMDE_ARCH_X86_MMX
# if (_M_IX86_FP >= 1)
# define SIMDE_ARCH_X86_SSE 1
# endif
# if (_M_IX86_FP >= 2)
# define SIMDE_ARCH_X86_SSE2 1
# endif
# elif defined(_M_X64)
# define SIMDE_ARCH_X86_SSE 1
# define SIMDE_ARCH_X86_SSE2 1
# else
# if defined(__MMX__)
# define SIMDE_ARCH_X86_MMX 1
# endif
# if defined(__SSE__)
# define SIMDE_ARCH_X86_SSE 1
# endif
# if defined(__SSE2__)
# define SIMDE_ARCH_X86_SSE2 1
# endif
# endif
# if defined(__SSE3__)
# define SIMDE_ARCH_X86_SSE3 1
# endif
# if defined(__SSSE3__)
# define SIMDE_ARCH_X86_SSSE3 1
# endif
# if defined(__SSE4_1__)
# define SIMDE_ARCH_X86_SSE4_1 1
# endif
# if defined(__SSE4_2__)
# define SIMDE_ARCH_X86_SSE4_2 1
# endif
# if defined(__XOP__)
# define SIMDE_ARCH_X86_XOP 1
# endif
# if defined(__AVX__)
# define SIMDE_ARCH_X86_AVX 1
# if !defined(SIMDE_ARCH_X86_SSE3)
# define SIMDE_ARCH_X86_SSE3 1
# endif
# if !defined(SIMDE_ARCH_X86_SSE4_1)
# define SIMDE_ARCH_X86_SSE4_1 1
# endif
# if !defined(SIMDE_ARCH_X86_SSE4_1)
# define SIMDE_ARCH_X86_SSE4_2 1
# endif
# endif
# if defined(__AVX2__)
# define SIMDE_ARCH_X86_AVX2 1
# endif
# if defined(__FMA__)
# define SIMDE_ARCH_X86_FMA 1
# if !defined(SIMDE_ARCH_X86_AVX)
# define SIMDE_ARCH_X86_AVX 1
# endif
# endif
# if defined(__AVX512VP2INTERSECT__)
# define SIMDE_ARCH_X86_AVX512VP2INTERSECT 1
# endif
# if defined(__AVX512BITALG__)
# define SIMDE_ARCH_X86_AVX512BITALG 1
# endif
# if defined(__AVX512VPOPCNTDQ__)
# define SIMDE_ARCH_X86_AVX512VPOPCNTDQ 1
# endif
# if defined(__AVX512VBMI__)
# define SIMDE_ARCH_X86_AVX512VBMI 1
# endif
# if defined(__AVX512VBMI2__)
# define SIMDE_ARCH_X86_AVX512VBMI2 1
# endif
# if defined(__AVX512VNNI__)
# define SIMDE_ARCH_X86_AVX512VNNI 1
# endif
# if defined(__AVX5124VNNIW__)
# define SIMDE_ARCH_X86_AVX5124VNNIW 1
# endif
# if defined(__AVX512BW__)
# define SIMDE_ARCH_X86_AVX512BW 1
# endif
# if defined(__AVX512BF16__)
# define SIMDE_ARCH_X86_AVX512BF16 1
# endif
# if defined(__AVX512CD__)
# define SIMDE_ARCH_X86_AVX512CD 1
# endif
# if defined(__AVX512DQ__)
# define SIMDE_ARCH_X86_AVX512DQ 1
# endif
# if defined(__AVX512F__)
# define SIMDE_ARCH_X86_AVX512F 1
# endif
# if defined(__AVX512VL__)
# define SIMDE_ARCH_X86_AVX512VL 1
# endif
# if defined(__GFNI__)
# define SIMDE_ARCH_X86_GFNI 1
# endif
# if defined(__PCLMUL__)
# define SIMDE_ARCH_X86_PCLMUL 1
# endif
# if defined(__VPCLMULQDQ__)
# define SIMDE_ARCH_X86_VPCLMULQDQ 1
# endif
# if defined(__F16C__)
# define SIMDE_ARCH_X86_F16C 1
# endif
#endif
/* Itanium
<https://en.wikipedia.org/wiki/Itanium> */
#if defined(__ia64__) || defined(_IA64) || defined(__IA64__) || defined(__ia64) || defined(_M_IA64) || defined(__itanium__)
# define SIMDE_ARCH_IA64 1
#endif
/* Renesas M32R
<https://en.wikipedia.org/wiki/M32R> */
#if defined(__m32r__) || defined(__M32R__)
# define SIMDE_ARCH_M32R
#endif
/* Motorola 68000
<https://en.wikipedia.org/wiki/Motorola_68000> */
#if defined(__mc68060__) || defined(__MC68060__)
# define SIMDE_ARCH_M68K 68060
#elif defined(__mc68040__) || defined(__MC68040__)
# define SIMDE_ARCH_M68K 68040
#elif defined(__mc68030__) || defined(__MC68030__)
# define SIMDE_ARCH_M68K 68030
#elif defined(__mc68020__) || defined(__MC68020__)
# define SIMDE_ARCH_M68K 68020
#elif defined(__mc68010__) || defined(__MC68010__)
# define SIMDE_ARCH_M68K 68010
#elif defined(__mc68000__) || defined(__MC68000__)
# define SIMDE_ARCH_M68K 68000
#endif
#if defined(SIMDE_ARCH_M68K)
# define SIMDE_ARCH_M68K_CHECK(version) ((version) <= SIMDE_ARCH_M68K)
#else
# define SIMDE_ARCH_M68K_CHECK(version) (0)
#endif
/* Xilinx MicroBlaze
<https://en.wikipedia.org/wiki/MicroBlaze> */
#if defined(__MICROBLAZE__) || defined(__microblaze__)
# define SIMDE_ARCH_MICROBLAZE
#endif
/* MIPS
<https://en.wikipedia.org/wiki/MIPS_architecture> */
#if defined(_MIPS_ISA_MIPS64R2)
# define SIMDE_ARCH_MIPS 642
#elif defined(_MIPS_ISA_MIPS64)
# define SIMDE_ARCH_MIPS 640
#elif defined(_MIPS_ISA_MIPS32R2)
# define SIMDE_ARCH_MIPS 322
#elif defined(_MIPS_ISA_MIPS32)
# define SIMDE_ARCH_MIPS 320
#elif defined(_MIPS_ISA_MIPS4)
# define SIMDE_ARCH_MIPS 4
#elif defined(_MIPS_ISA_MIPS3)
# define SIMDE_ARCH_MIPS 3
#elif defined(_MIPS_ISA_MIPS2)
# define SIMDE_ARCH_MIPS 2
#elif defined(_MIPS_ISA_MIPS1)
# define SIMDE_ARCH_MIPS 1
#elif defined(_MIPS_ISA_MIPS) || defined(__mips) || defined(__MIPS__)
# define SIMDE_ARCH_MIPS 1
#endif
#if defined(SIMDE_ARCH_MIPS)
# define SIMDE_ARCH_MIPS_CHECK(version) ((version) <= SIMDE_ARCH_MIPS)
#else
# define SIMDE_ARCH_MIPS_CHECK(version) (0)
#endif
#if defined(__mips_loongson_mmi)
# define SIMDE_ARCH_MIPS_LOONGSON_MMI 1
#endif
#if defined(__mips_msa)
# define SIMDE_ARCH_MIPS_MSA 1
#endif
/* Matsushita MN10300
<https://en.wikipedia.org/wiki/MN103> */
#if defined(__MN10300__) || defined(__mn10300__)
# define SIMDE_ARCH_MN10300 1
#endif
/* POWER
<https://en.wikipedia.org/wiki/IBM_POWER_Instruction_Set_Architecture> */
#if defined(_M_PPC)
# define SIMDE_ARCH_POWER _M_PPC
#elif defined(_ARCH_PWR9)
# define SIMDE_ARCH_POWER 900
#elif defined(_ARCH_PWR8)
# define SIMDE_ARCH_POWER 800
#elif defined(_ARCH_PWR7)
# define SIMDE_ARCH_POWER 700
#elif defined(_ARCH_PWR6)
# define SIMDE_ARCH_POWER 600
#elif defined(_ARCH_PWR5)
# define SIMDE_ARCH_POWER 500
#elif defined(_ARCH_PWR4)
# define SIMDE_ARCH_POWER 400
#elif defined(_ARCH_440) || defined(__ppc440__)
# define SIMDE_ARCH_POWER 440
#elif defined(_ARCH_450) || defined(__ppc450__)
# define SIMDE_ARCH_POWER 450
#elif defined(_ARCH_601) || defined(__ppc601__)
# define SIMDE_ARCH_POWER 601
#elif defined(_ARCH_603) || defined(__ppc603__)
# define SIMDE_ARCH_POWER 603
#elif defined(_ARCH_604) || defined(__ppc604__)
# define SIMDE_ARCH_POWER 604
#elif defined(_ARCH_605) || defined(__ppc605__)
# define SIMDE_ARCH_POWER 605
#elif defined(_ARCH_620) || defined(__ppc620__)
# define SIMDE_ARCH_POWER 620
#elif defined(__powerpc) || defined(__powerpc__) || defined(__POWERPC__) || defined(__ppc__) || defined(__PPC__) || defined(_ARCH_PPC) || defined(__ppc)
# define SIMDE_ARCH_POWER 1
#endif
#if defined(SIMDE_ARCH_POWER)
#define SIMDE_ARCH_POWER_CHECK(version) ((version) <= SIMDE_ARCH_POWER)
#else
#define SIMDE_ARCH_POWER_CHECK(version) (0)
#endif
#if defined(__ALTIVEC__)
# define SIMDE_ARCH_POWER_ALTIVEC SIMDE_ARCH_POWER
#define SIMDE_ARCH_POWER_ALTIVEC_CHECK(version) ((version) <= SIMDE_ARCH_POWER)
#else
#define SIMDE_ARCH_POWER_ALTIVEC_CHECK(version) (0)
#endif
/* SPARC
<https://en.wikipedia.org/wiki/SPARC> */
#if defined(__sparc_v9__) || defined(__sparcv9)
# define SIMDE_ARCH_SPARC 9
#elif defined(__sparc_v8__) || defined(__sparcv8)
# define SIMDE_ARCH_SPARC 8
#elif defined(__sparc_v7__) || defined(__sparcv7)
# define SIMDE_ARCH_SPARC 7
#elif defined(__sparc_v6__) || defined(__sparcv6)
# define SIMDE_ARCH_SPARC 6
#elif defined(__sparc_v5__) || defined(__sparcv5)
# define SIMDE_ARCH_SPARC 5
#elif defined(__sparc_v4__) || defined(__sparcv4)
# define SIMDE_ARCH_SPARC 4
#elif defined(__sparc_v3__) || defined(__sparcv3)
# define SIMDE_ARCH_SPARC 3
#elif defined(__sparc_v2__) || defined(__sparcv2)
# define SIMDE_ARCH_SPARC 2
#elif defined(__sparc_v1__) || defined(__sparcv1)
# define SIMDE_ARCH_SPARC 1
#elif defined(__sparc__) || defined(__sparc)
# define SIMDE_ARCH_SPARC 1
#endif
#if defined(SIMDE_ARCH_SPARC)
#define SIMDE_ARCH_SPARC_CHECK(version) ((version) <= SIMDE_ARCH_SPARC)
#else
#define SIMDE_ARCH_SPARC_CHECK(version) (0)
#endif
/* SuperH
<https://en.wikipedia.org/wiki/SuperH> */
#if defined(__sh5__) || defined(__SH5__)
# define SIMDE_ARCH_SUPERH 5
#elif defined(__sh4__) || defined(__SH4__)
# define SIMDE_ARCH_SUPERH 4
#elif defined(__sh3__) || defined(__SH3__)
# define SIMDE_ARCH_SUPERH 3
#elif defined(__sh2__) || defined(__SH2__)
# define SIMDE_ARCH_SUPERH 2
#elif defined(__sh1__) || defined(__SH1__)
# define SIMDE_ARCH_SUPERH 1
#elif defined(__sh__) || defined(__SH__)
# define SIMDE_ARCH_SUPERH 1
#endif
/* IBM System z
<https://en.wikipedia.org/wiki/IBM_System_z> */
#if defined(__370__) || defined(__THW_370__) || defined(__s390__) || defined(__s390x__) || defined(__zarch__) || defined(__SYSC_ZARCH__)
# define SIMDE_ARCH_ZARCH __ARCH__
#endif
#if defined(SIMDE_ARCH_ZARCH)
#define SIMDE_ARCH_ZARCH_CHECK(version) ((version) <= SIMDE_ARCH_ZARCH)
#else
#define SIMDE_ARCH_ZARCH_CHECK(version) (0)
#endif
#if defined(SIMDE_ARCH_ZARCH) && defined(__VEC__)
#define SIMDE_ARCH_ZARCH_ZVECTOR SIMDE_ARCH_ZARCH
#endif
/* TMS320 DSP
<https://en.wikipedia.org/wiki/Texas_Instruments_TMS320> */
#if defined(_TMS320C6740) || defined(__TMS320C6740__)
# define SIMDE_ARCH_TMS320 6740
#elif defined(_TMS320C6700_PLUS) || defined(__TMS320C6700_PLUS__)
# define SIMDE_ARCH_TMS320 6701
#elif defined(_TMS320C6700) || defined(__TMS320C6700__)
# define SIMDE_ARCH_TMS320 6700
#elif defined(_TMS320C6600) || defined(__TMS320C6600__)
# define SIMDE_ARCH_TMS320 6600
#elif defined(_TMS320C6400_PLUS) || defined(__TMS320C6400_PLUS__)
# define SIMDE_ARCH_TMS320 6401
#elif defined(_TMS320C6400) || defined(__TMS320C6400__)
# define SIMDE_ARCH_TMS320 6400
#elif defined(_TMS320C6200) || defined(__TMS320C6200__)
# define SIMDE_ARCH_TMS320 6200
#elif defined(_TMS320C55X) || defined(__TMS320C55X__)
# define SIMDE_ARCH_TMS320 550
#elif defined(_TMS320C54X) || defined(__TMS320C54X__)
# define SIMDE_ARCH_TMS320 540
#elif defined(_TMS320C28X) || defined(__TMS320C28X__)
# define SIMDE_ARCH_TMS320 280
#endif
#if defined(SIMDE_ARCH_TMS320)
#define SIMDE_ARCH_TMS320_CHECK(version) ((version) <= SIMDE_ARCH_TMS320)
#else
#define SIMDE_ARCH_TMS320_CHECK(version) (0)
#endif
/* WebAssembly */
#if defined(__wasm__)
# define SIMDE_ARCH_WASM 1
#endif
#if defined(SIMDE_ARCH_WASM) && defined(__wasm_simd128__)
# define SIMDE_ARCH_WASM_SIMD128
#endif
/* Xtensa
<https://en.wikipedia.org/wiki/> */
#if defined(__xtensa__) || defined(__XTENSA__)
# define SIMDE_ARCH_XTENSA 1
#endif
#endif /* !defined(SIMDE_ARCH_H) */
/* :: End simde-arch.h :: */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-features.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
/* simde-arch.h is used to determine which features are available according
to the compiler. However, we want to make it possible to forcibly enable
or disable APIs */
#if !defined(SIMDE_FEATURES_H)
#define SIMDE_FEATURES_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-diagnostic.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
*/
/* SIMDe targets a very wide range of standards and compilers, and our
* goal is to compile cleanly even with extremely aggressive warnings
* (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.)
* treated as errors.
*
* While our preference is to resolve the underlying issue a given
* diagnostic is warning us about, sometimes that's not possible.
* Fixing a warning in one compiler may cause problems in another.
* Sometimes a warning doesn't really apply to us (false positives),
* and sometimes adhering to a warning would mean dropping a feature
* we *know* the compiler supports since we have tested specifically
* for the compiler or feature.
*
* When practical, warnings are only disabled for specific code. For
* a list of warnings which are enabled by default in all SIMDe code,
* see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the
* warning stack when SIMDe is done parsing, so code which includes
* SIMDe is not deprived of these warnings.
*/
#if !defined(SIMDE_DIAGNOSTIC_H)
#define SIMDE_DIAGNOSTIC_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* This is only to help us implement functions like _mm_undefined_ps. */
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
#undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
#if HEDLEY_HAS_WARNING("-Wuninitialized")
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,2,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"")
#elif HEDLEY_PGI_VERSION_CHECK(19,10,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)")
#elif \
HEDLEY_TI_VERSION_CHECK(16,9,9) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700))
#endif
/* GCC emits a lot of "notes" about the ABI being different for things
* in newer versions of GCC. We don't really care because all our
* functions are inlined and don't generate ABI. */
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PSABI_
#endif
/* Since MMX uses x87 FP registers, you're supposed to call _mm_empty()
* after each MMX function before any floating point instructions.
* Some compilers warn about functions which use MMX functions but
* don't call _mm_empty(). However, since SIMDe is implementyng the
* MMX API we shouldn't be calling _mm_empty(); we leave it to the
* caller to invoke simde_mm_empty(). */
#if HEDLEY_INTEL_VERSION_CHECK(19,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)")
#elif defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_
#endif
/* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they
* emit a diagnostic if you use #pragma simd instead of
* #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to
* compile with -qopenmp or -qopenmp-simd and define
* SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */
#if HEDLEY_INTEL_VERSION_CHECK(18,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_
#endif
/* MSVC emits a diagnostic when we call a function (like
* simde_mm_set_epi32) while initializing a struct. We currently do
* this a *lot* in the tests. */
#if \
defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_
#endif
/* This warning needs a lot of work. It is triggered if all you do is
* pass the value to memcpy/__builtin_memcpy, or if you initialize a
* member of the union, even if that member takes up the entire union.
* Last tested with clang-10, hopefully things will improve in the
* future; if clang fixes this I'd love to enable it. */
#if \
HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
#define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_
#endif
/* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which
* will is false. However, SIMDe uses these operations exclusively
* for things like _mm_cmpeq_ps, for which we really do want to check
* for equality (or inequality).
*
* If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro
* which just wraps a check in some code do disable this diagnostic I'd
* be happy to accept it. */
#if \
HEDLEY_HAS_WARNING("-Wfloat-equal") || \
HEDLEY_GCC_VERSION_CHECK(3,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_
#endif
/* This is because we use HEDLEY_STATIC_ASSERT for static assertions.
* If Hedley can't find an implementation it will preprocess to
* nothing, which means there will be a trailing semi-colon. */
#if HEDLEY_HAS_WARNING("-Wextra-semi")
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"")
#elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_
#endif
/* We do use a few variadic macros, which technically aren't available
* until C99 and C++11, but every compiler I'm aware of has supported
* them for much longer. That said, usage is isolated to the test
* suite and compilers known to support them. */
#if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0)
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \
_Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \
_Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"")
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_
#endif
/* emscripten requires us to use a __wasm_unimplemented_simd128__ macro
* before we can access certain SIMD intrinsics, but this diagnostic
* warns about it being a reserved name. It is a reserved name, but
* it's reserved for the compiler and we are using it to convey
* information to the compiler.
*
* This is also used when enabling native aliases since we don't get to
* choose the macro names. */
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_
#endif
/* Similar to above; types like simde__m128i are reserved due to the
* double underscore, but we didn't choose them, Intel did. */
#if HEDLEY_HAS_WARNING("-Wreserved-identifier")
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ _Pragma("clang diagnostic ignored \"-Wreserved-identifier\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_
#endif
/* clang 3.8 warns about the packed attribute being unnecessary when
* used in the _mm_loadu_* functions. That *may* be true for version
* 3.8, but for later versions it is crucial in order to make unaligned
* access safe. */
#if HEDLEY_HAS_WARNING("-Wpacked")
#define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PACKED_
#endif
/* Triggered when assigning a float to a double implicitly. We use
* explicit casts in SIMDe, this is only used in the test suite. */
#if HEDLEY_HAS_WARNING("-Wdouble-promotion")
#define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_
#endif
/* Several compilers treat conformant array parameters as VLAs. We
* test to make sure we're in C mode (C++ doesn't support CAPs), and
* that the version of the standard supports CAPs. We also reject
* some buggy compilers like MSVC (the logic is in Hedley if you want
* to take a look), but with certain warnings enabled some compilers
* still like to emit a diagnostic. */
#if HEDLEY_HAS_WARNING("-Wvla")
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_
#endif
/* If you add an unused attribute to a function and don't use it, clang
* may emit this. */
#if HEDLEY_HAS_WARNING("-Wused-but-marked-unused")
#define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_
#endif
#if HEDLEY_HAS_WARNING("-Wpass-failed")
#define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_
#endif
#if HEDLEY_HAS_WARNING("-Wpadded")
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_
#endif
#if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant")
#define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_
#endif
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_
#endif
#if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_
#endif
/* clang will emit this warning when we use C99 extensions whan not in
* C99 mode, even though it does support this. In such cases we check
* the compiler and version first, so we know it's not a problem. */
#if HEDLEY_HAS_WARNING("-Wc99-extensions")
#define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_
#endif
/* https://github.com/simd-everywhere/simde/issues/277 */
#if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_
#endif
/* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS
* to silence, but you have to do that before including anything and
* that would require reordering includes. */
#if defined(_MSC_VER)
#define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_
#endif
/* Some compilers, such as clang, may use `long long` for 64-bit
* integers, but `long long` triggers a diagnostic with
* -Wc++98-compat-pedantic which says 'long long' is incompatible with
* C++98. */
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
#if HEDLEY_HAS_WARNING("-Wc++11-long-long")
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \
_Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") \
_Pragma("clang diagnostic ignored \"-Wc++11-long-long\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"")
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_
#endif
/* Some problem as above */
#if HEDLEY_HAS_WARNING("-Wc++11-long-long")
#define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_
#endif
/* emscripten emits this whenever stdin/stdout/stderr is used in a
* macro. */
#if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion")
#define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_
#endif
/* Clang uses C11 generic selections to implement some AltiVec
* functions, which triggers this diagnostic when not compiling
* in C11 mode */
#if HEDLEY_HAS_WARNING("-Wc11-extensions")
#define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc11-extensions\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_
#endif
/* Clang sometimes triggers this warning in macros in the AltiVec and
* NEON headers, or due to missing functions. */
#if HEDLEY_HAS_WARNING("-Wvector-conversion")
#define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"")
/* For NEON, the situation with -Wvector-conversion in clang < 10 is
* bad enough that we just disable the warning altogether. On x86,
* clang has similar issues on several sse4.2+ intrinsics before 3.8. */
#if \
(defined(SIMDE_ARCH_ARM) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)) || \
SIMDE_DETECT_CLANG_VERSION_NOT(3,8,0)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
#if !defined(SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_
#endif
/* Prior to 5.0, clang didn't support disabling diagnostics in
* statement exprs. As a result, some macros we use don't
* properly silence warnings. */
#if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") && HEDLEY_HAS_WARNING("-Wcast-align")
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") _Pragma("clang diagnostic ignored \"-Wcast-align\"")
#elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual")
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
#elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-align")
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-align\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_
#endif
/* SLEEF triggers this a *lot* in their headers */
#if HEDLEY_HAS_WARNING("-Wignored-qualifiers")
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_
#endif
/* GCC emits this under some circumstances when using __int128 */
#if HEDLEY_GCC_VERSION_CHECK(4,8,0)
#define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ _Pragma("GCC diagnostic ignored \"-Wpedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_
#endif
/* MSVC doesn't like (__assume(0), code) and will warn about code being
* unreachable, but we want it there because not all compilers
* understand the unreachable macro and will complain if it is missing.
* I'm planning on adding a new macro to Hedley to handle this a bit
* more elegantly, but until then... */
#if defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ __pragma(warning(disable:4702))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_
#endif
/* This is a false positive from GCC in a few places. */
#if HEDLEY_GCC_VERSION_CHECK(4,7,0)
#define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_
#endif
#if defined(SIMDE_ENABLE_NATIVE_ALIASES)
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \
SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_
#else
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_
#endif
/* Some native functions on E2K with instruction set < v6 are declared
* as deprecated due to inefficiency. Still they are more efficient
* than SIMDe implementation. So we're using them, and switching off
* these deprecation warnings. */
#if defined(HEDLEY_MCST_LCC_VERSION)
# define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS _Pragma("diag_suppress 1215,1444")
# define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS _Pragma("diag_default 1215,1444")
#else
# define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS
# define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS
#endif
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \
HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION \
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \
SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \
SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \
SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \
SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \
SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \
SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \
SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \
SIMDE_DIAGNOSTIC_DISABLE_VLA_ \
SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \
SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \
SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \
SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ \
SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_
#endif /* !defined(SIMDE_DIAGNOSTIC_H) */
/* :: End simde-diagnostic.h :: */
#if !defined(SIMDE_X86_SVML_NATIVE) && !defined(SIMDE_X86_SVML_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SVML)
#define SIMDE_X86_SVML_NATIVE
#endif
#endif
#if defined(SIMDE_X86_SVML_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && !defined(SIMDE_X86_AVX512VP2INTERSECT_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512VP2INTERSECT)
#define SIMDE_X86_AVX512VP2INTERSECT_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512VP2INTERSECT_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && !defined(SIMDE_X86_AVX512VPOPCNTDQ_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512VPOPCNTDQ)
#define SIMDE_X86_AVX512VPOPCNTDQ_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512BITALG_NATIVE) && !defined(SIMDE_X86_AVX512BITALG_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512BITALG)
#define SIMDE_X86_AVX512BITALG_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512BITALG_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512VBMI_NATIVE) && !defined(SIMDE_X86_AVX512VBMI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512VBMI)
#define SIMDE_X86_AVX512VBMI_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512VBMI_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512VBMI2_NATIVE) && !defined(SIMDE_X86_AVX512VBMI2_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512VBMI2)
#define SIMDE_X86_AVX512VBMI2_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512VBMI2_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512VNNI_NATIVE) && !defined(SIMDE_X86_AVX512VNNI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512VNNI)
#define SIMDE_X86_AVX512VNNI_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512VNNI_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX5124VNNIW_NATIVE) && !defined(SIMDE_X86_AVX5124VNNIW_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX5124VNNIW)
#define SIMDE_X86_AVX5124VNNIW_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX5124VNNIW_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512CD_NATIVE) && !defined(SIMDE_X86_AVX512CD_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512CD)
#define SIMDE_X86_AVX512CD_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512CD_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512DQ_NATIVE) && !defined(SIMDE_X86_AVX512DQ_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512DQ)
#define SIMDE_X86_AVX512DQ_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512DQ_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512VL_NATIVE) && !defined(SIMDE_X86_AVX512VL_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512VL)
#define SIMDE_X86_AVX512VL_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512VL_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(SIMDE_X86_AVX512BW_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512BW)
#define SIMDE_X86_AVX512BW_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512BF16_NATIVE) && !defined(SIMDE_X86_AVX512BF16_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512BF16)
#define SIMDE_X86_AVX512BF16_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512BF16_NATIVE) && !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#if !defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_X86_AVX512F_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX512F)
#define SIMDE_X86_AVX512F_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX512F_NATIVE) && !defined(SIMDE_X86_AVX2_NATIVE)
#define SIMDE_X86_AVX2_NATIVE
#endif
#if !defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_X86_FMA_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_FMA)
#define SIMDE_X86_FMA_NATIVE
#endif
#endif
#if defined(SIMDE_X86_FMA_NATIVE) && !defined(SIMDE_X86_AVX_NATIVE)
#define SIMDE_X86_AVX_NATIVE
#endif
#if !defined(SIMDE_X86_AVX2_NATIVE) && !defined(SIMDE_X86_AVX2_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX2)
#define SIMDE_X86_AVX2_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX2_NATIVE) && !defined(SIMDE_X86_AVX_NATIVE)
#define SIMDE_X86_AVX_NATIVE
#endif
#if !defined(SIMDE_X86_AVX_NATIVE) && !defined(SIMDE_X86_AVX_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_AVX)
#define SIMDE_X86_AVX_NATIVE
#endif
#endif
#if defined(SIMDE_X86_AVX_NATIVE) && !defined(SIMDE_X86_SSE4_1_NATIVE)
#define SIMDE_X86_SSE4_2_NATIVE
#endif
#if !defined(SIMDE_X86_XOP_NATIVE) && !defined(SIMDE_X86_XOP_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_XOP)
#define SIMDE_X86_XOP_NATIVE
#endif
#endif
#if defined(SIMDE_X86_XOP_NATIVE) && !defined(SIMDE_X86_SSE4_2_NATIVE)
#define SIMDE_X86_SSE4_2_NATIVE
#endif
#if !defined(SIMDE_X86_SSE4_2_NATIVE) && !defined(SIMDE_X86_SSE4_2_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SSE4_2)
#define SIMDE_X86_SSE4_2_NATIVE
#endif
#endif
#if defined(SIMDE_X86_SSE4_2_NATIVE) && !defined(SIMDE_X86_SSE4_1_NATIVE)
#define SIMDE_X86_SSE4_1_NATIVE
#endif
#if !defined(SIMDE_X86_SSE4_1_NATIVE) && !defined(SIMDE_X86_SSE4_1_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SSE4_1)
#define SIMDE_X86_SSE4_1_NATIVE
#endif
#endif
#if defined(SIMDE_X86_SSE4_1_NATIVE) && !defined(SIMDE_X86_SSSE3_NATIVE)
#define SIMDE_X86_SSSE3_NATIVE
#endif
#if !defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_X86_SSSE3_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SSSE3)
#define SIMDE_X86_SSSE3_NATIVE
#endif
#endif
#if defined(SIMDE_X86_SSSE3_NATIVE) && !defined(SIMDE_X86_SSE3_NATIVE)
#define SIMDE_X86_SSE3_NATIVE
#endif
#if !defined(SIMDE_X86_SSE3_NATIVE) && !defined(SIMDE_X86_SSE3_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SSE3)
#define SIMDE_X86_SSE3_NATIVE
#endif
#endif
#if defined(SIMDE_X86_SSE3_NATIVE) && !defined(SIMDE_X86_SSE2_NATIVE)
#define SIMDE_X86_SSE2_NATIVE
#endif
#if !defined(SIMDE_X86_SSE2_NATIVE) && !defined(SIMDE_X86_SSE2_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SSE2)
#define SIMDE_X86_SSE2_NATIVE
#endif
#endif
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(SIMDE_X86_SSE_NATIVE)
#define SIMDE_X86_SSE_NATIVE
#endif
#if !defined(SIMDE_X86_SSE_NATIVE) && !defined(SIMDE_X86_SSE_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_SSE)
#define SIMDE_X86_SSE_NATIVE
#endif
#endif
#if !defined(SIMDE_X86_MMX_NATIVE) && !defined(SIMDE_X86_MMX_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_MMX)
#define SIMDE_X86_MMX_NATIVE
#endif
#endif
#if !defined(SIMDE_X86_GFNI_NATIVE) && !defined(SIMDE_X86_GFNI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_GFNI)
#define SIMDE_X86_GFNI_NATIVE
#endif
#endif
#if !defined(SIMDE_X86_PCLMUL_NATIVE) && !defined(SIMDE_X86_PCLMUL_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_PCLMUL)
#define SIMDE_X86_PCLMUL_NATIVE
#endif
#endif
#if !defined(SIMDE_X86_VPCLMULQDQ_NATIVE) && !defined(SIMDE_X86_VPCLMULQDQ_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_VPCLMULQDQ)
#define SIMDE_X86_VPCLMULQDQ_NATIVE
#endif
#endif
#if !defined(SIMDE_X86_F16C_NATIVE) && !defined(SIMDE_X86_F16C_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_X86_F16C)
#define SIMDE_X86_F16C_NATIVE
#endif
#endif
#if !defined(SIMDE_X86_SVML_NATIVE) && !defined(SIMDE_X86_SVML_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(__INTEL_COMPILER)
#define SIMDE_X86_SVML_NATIVE
#endif
#endif
#if defined(HEDLEY_MSVC_VERSION)
#pragma warning(push)
#pragma warning(disable:4799)
#endif
#if \
defined(SIMDE_X86_AVX_NATIVE) || defined(SIMDE_X86_GFNI_NATIVE)
#include <immintrin.h>
#elif defined(SIMDE_X86_SSE4_2_NATIVE)
#include <nmmintrin.h>
#elif defined(SIMDE_X86_SSE4_1_NATIVE)
#include <smmintrin.h>
#elif defined(SIMDE_X86_SSSE3_NATIVE)
#include <tmmintrin.h>
#elif defined(SIMDE_X86_SSE3_NATIVE)
#include <pmmintrin.h>
#elif defined(SIMDE_X86_SSE2_NATIVE)
#include <emmintrin.h>
#elif defined(SIMDE_X86_SSE_NATIVE)
#include <xmmintrin.h>
#elif defined(SIMDE_X86_MMX_NATIVE)
#include <mmintrin.h>
#endif
#if defined(SIMDE_X86_XOP_NATIVE)
#if defined(_MSC_VER)
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#endif
#if defined(HEDLEY_MSVC_VERSION)
#pragma warning(pop)
#endif
#if !defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_ARM_NEON) && defined(SIMDE_ARCH_AARCH64) && SIMDE_ARCH_ARM_CHECK(8,0)
#define SIMDE_ARM_NEON_A64V8_NATIVE
#endif
#endif
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && !defined(SIMDE_ARM_NEON_A32V8_NATIVE)
#define SIMDE_ARM_NEON_A32V8_NATIVE
#endif
#if !defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_ARM_NEON_A32V8_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(8,0) && (__ARM_NEON_FP & 0x02)
#define SIMDE_ARM_NEON_A32V8_NATIVE
#endif
#endif
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_ARM_NEON_A32V7_NATIVE
#endif
#if !defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A32V7_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_ARM_NEON) && SIMDE_ARCH_ARM_CHECK(7,0)
#define SIMDE_ARM_NEON_A32V7_NATIVE
#endif
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#include <arm_neon.h>
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
#include <arm_fp16.h>
#endif
#endif
#if !defined(SIMDE_ARM_SVE_NATIVE) && !defined(SIMDE_ARM_SVE_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_ARM_SVE)
#define SIMDE_ARM_SVE_NATIVE
#include <arm_sve.h>
#endif
#endif
#if !defined(SIMDE_WASM_SIMD128_NATIVE) && !defined(SIMDE_WASM_SIMD128_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_WASM_SIMD128)
#define SIMDE_WASM_SIMD128_NATIVE
#endif
#endif
#if defined(SIMDE_WASM_SIMD128_NATIVE)
#include <wasm_simd128.h>
#endif
#if !defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P9_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_POWER_ALTIVEC_CHECK(900)
#define SIMDE_POWER_ALTIVEC_P9_NATIVE
#endif
#endif
#if defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P8)
#define SIMDE_POWER_ALTIVEC_P8_NATIVE
#endif
#if !defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P8_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_POWER_ALTIVEC_CHECK(800)
#define SIMDE_POWER_ALTIVEC_P8_NATIVE
#endif
#endif
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P7)
#define SIMDE_POWER_ALTIVEC_P7_NATIVE
#endif
#if !defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P7_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_POWER_ALTIVEC_CHECK(700)
#define SIMDE_POWER_ALTIVEC_P7_NATIVE
#endif
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P6)
#define SIMDE_POWER_ALTIVEC_P6_NATIVE
#endif
#if !defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P6_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_POWER_ALTIVEC_CHECK(600)
#define SIMDE_POWER_ALTIVEC_P6_NATIVE
#endif
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P5)
#define SIMDE_POWER_ALTIVEC_P5_NATIVE
#endif
#if !defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) && !defined(SIMDE_POWER_ALTIVEC_P5_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_POWER_ALTIVEC_CHECK(500)
#define SIMDE_POWER_ALTIVEC_P5_NATIVE
#endif
#endif
#if !defined(SIMDE_ZARCH_ZVECTOR_15_NATIVE) && !defined(SIMDE_ZARCH_ZVECTOR_15_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_ZARCH_CHECK(13) && defined(SIMDE_ARCH_ZARCH_ZVECTOR)
#define SIMDE_ZARCH_ZVECTOR_15_NATIVE
#endif
#endif
#if !defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE) && !defined(SIMDE_ZARCH_ZVECTOR_14_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_ZARCH_CHECK(12) && defined(SIMDE_ARCH_ZARCH_ZVECTOR)
#define SIMDE_ZARCH_ZVECTOR_14_NATIVE
#endif
#endif
#if !defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) && !defined(SIMDE_ZARCH_ZVECTOR_13_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if SIMDE_ARCH_ZARCH_CHECK(11) && defined(SIMDE_ARCH_ZARCH_ZVECTOR)
#define SIMDE_ZARCH_ZVECTOR_13_NATIVE
#endif
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
/* AltiVec conflicts with lots of stuff. The bool keyword conflicts
* with the bool keyword in C++ and the bool macro in C99+ (defined
* in stdbool.h). The vector keyword conflicts with std::vector in
* C++ if you are `using std;`.
*
* Luckily AltiVec allows you to use `__vector`/`__bool`/`__pixel`
* instead, but altivec.h will unconditionally define
* `vector`/`bool`/`pixel` so we need to work around that.
*
* Unfortunately this means that if your code uses AltiVec directly
* it may break. If this is the case you'll want to define
* `SIMDE_POWER_ALTIVEC_NO_UNDEF` before including SIMDe. Or, even
* better, port your code to use the double-underscore versions. */
#if defined(bool)
#undef bool
#endif
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#include <altivec.h>
#if !defined(SIMDE_POWER_ALTIVEC_NO_UNDEF)
#if defined(vector)
#undef vector
#endif
#if defined(pixel)
#undef pixel
#endif
#if defined(bool)
#undef bool
#endif
#endif /* !defined(SIMDE_POWER_ALTIVEC_NO_UNDEF) */
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
#include <vecintrin.h>
#endif
/* Use these intsead of vector/pixel/bool in SIMDe. */
#define SIMDE_POWER_ALTIVEC_VECTOR(T) __vector T
#define SIMDE_POWER_ALTIVEC_PIXEL __pixel
#define SIMDE_POWER_ALTIVEC_BOOL __bool
/* Re-define bool if we're using stdbool.h */
#if !defined(__cplusplus) && defined(__bool_true_false_are_defined) && !defined(SIMDE_POWER_ALTIVEC_NO_UNDEF)
#define bool _Bool
#endif
#endif
#if !defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) && !defined(SIMDE_MIPS_LOONGSON_MMI_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_MIPS_LOONGSON_MMI)
#define SIMDE_MIPS_LOONGSON_MMI_NATIVE 1
#endif
#endif
#if defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
#include <loongson-mmiintrin.h>
#endif
#if !defined(SIMDE_MIPS_MSA_NATIVE) && !defined(SIMDE_MIPS_MSA_NO_NATIVE) && !defined(SIMDE_NO_NATIVE)
#if defined(SIMDE_ARCH_MIPS_MSA)
#define SIMDE_MIPS_MSA_NATIVE 1
#endif
#endif
#if defined(SIMDE_MIPS_MSA_NATIVE)
#include <msa.h>
#endif
/* This is used to determine whether or not to fall back on a vector
* function in an earlier ISA extensions, as well as whether
* we expected any attempts at vectorization to be fruitful or if we
* expect to always be running serial code. */
#if !defined(SIMDE_NATURAL_VECTOR_SIZE)
#if defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_NATURAL_VECTOR_SIZE (512)
#elif defined(SIMDE_X86_AVX_NATIVE)
#define SIMDE_NATURAL_VECTOR_SIZE (256)
#elif \
defined(SIMDE_X86_SSE_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V7_NATIVE) || \
defined(SIMDE_WASM_SIMD128_NATIVE) || \
defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) || \
defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE) || \
defined(SIMDE_MIPS_MSA_NATIVE)
#define SIMDE_NATURAL_VECTOR_SIZE (128)
#endif
#if !defined(SIMDE_NATURAL_VECTOR_SIZE)
#define SIMDE_NATURAL_VECTOR_SIZE (0)
#endif
#endif
#define SIMDE_NATURAL_VECTOR_SIZE_LE(x) ((SIMDE_NATURAL_VECTOR_SIZE > 0) && (SIMDE_NATURAL_VECTOR_SIZE <= (x)))
#define SIMDE_NATURAL_VECTOR_SIZE_GE(x) ((SIMDE_NATURAL_VECTOR_SIZE > 0) && (SIMDE_NATURAL_VECTOR_SIZE >= (x)))
/* Native aliases */
#if defined(SIMDE_ENABLE_NATIVE_ALIASES)
#if !defined(SIMDE_X86_MMX_NATIVE)
#define SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_SSE_NATIVE)
#define SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_SSE2_NATIVE)
#define SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_SSE3_NATIVE)
#define SIMDE_X86_SSE3_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_SSSE3_NATIVE)
#define SIMDE_X86_SSSE3_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_SSE4_1_NATIVE)
#define SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_SSE4_2_NATIVE)
#define SIMDE_X86_SSE4_2_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX_NATIVE)
#define SIMDE_X86_AVX_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX2_NATIVE)
#define SIMDE_X86_AVX2_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_FMA_NATIVE)
#define SIMDE_X86_FMA_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512F_NATIVE)
#define SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512VL_NATIVE)
#define SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512VBMI_NATIVE)
#define SIMDE_X86_AVX512VBMI_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512VBMI2_NATIVE)
#define SIMDE_X86_AVX512VBMI2_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512BW_NATIVE)
#define SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512VNNI_NATIVE)
#define SIMDE_X86_AVX512VNNI_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX5124VNNIW_NATIVE)
#define SIMDE_X86_AVX5124VNNIW_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512BF16_NATIVE)
#define SIMDE_X86_AVX512BF16_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512BITALG_NATIVE)
#define SIMDE_X86_AVX512BITALG_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512VPOPCNTDQ_NATIVE)
#define SIMDE_X86_AVX512VPOPCNTDQ_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512DQ_NATIVE)
#define SIMDE_X86_AVX512DQ_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_AVX512CD_NATIVE)
#define SIMDE_X86_AVX512CD_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_GFNI_NATIVE)
#define SIMDE_X86_GFNI_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_PCLMUL_NATIVE)
#define SIMDE_X86_PCLMUL_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_VPCLMULQDQ_NATIVE)
#define SIMDE_X86_VPCLMULQDQ_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_X86_F16C_NATIVE)
#define SIMDE_X86_F16C_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_ARM_NEON_A32V8_NATIVE)
#define SIMDE_ARM_NEON_A32V8_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_ARM_SVE_NATIVE)
#define SIMDE_ARM_SVE_ENABLE_NATIVE_ALIASES
#endif
#if !defined(SIMDE_WASM_SIMD128_NATIVE)
#define SIMDE_WASM_SIMD128_ENABLE_NATIVE_ALIASES
#endif
#endif
/* Are floating point values stored using IEEE 754? Knowing
* this at during preprocessing is a bit tricky, mostly because what
* we're curious about is how values are stored and not whether the
* implementation is fully conformant in terms of rounding, NaN
* handling, etc.
*
* For example, if you use -ffast-math or -Ofast on
* GCC or clang IEEE 754 isn't strictly followed, therefore IEE 754
* support is not advertised (by defining __STDC_IEC_559__).
*
* However, what we care about is whether it is safe to assume that
* floating point values are stored in IEEE 754 format, in which case
* we can provide faster implementations of some functions.
*
* Luckily every vaugely modern architecture I'm aware of uses IEEE 754-
* so we just assume IEEE 754 for now. There is a test which verifies
* this, if that test fails sowewhere please let us know and we'll add
* an exception for that platform. Meanwhile, you can define
* SIMDE_NO_IEEE754_STORAGE. */
#if !defined(SIMDE_IEEE754_STORAGE) && !defined(SIMDE_NO_IEE754_STORAGE)
#define SIMDE_IEEE754_STORAGE
#endif
#endif /* !defined(SIMDE_FEATURES_H) */
/* :: End simde-features.h :: */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-math.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
*/
/* Attempt to find math functions. Functions may be in <cmath>,
* <math.h>, compiler built-ins/intrinsics, or platform/architecture
* specific headers. In some cases, especially those not built in to
* libm, we may need to define our own implementations. */
#if !defined(SIMDE_MATH_H)
#define SIMDE_MATH_H 1
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
#include <stdint.h>
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#include <arm_neon.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
/* SLEEF support
* https://sleef.org/
*
* If you include <sleef.h> prior to including SIMDe, SIMDe will use
* SLEEF. You can also define SIMDE_MATH_SLEEF_ENABLE prior to
* including SIMDe to force the issue.
*
* Note that SLEEF does requires linking to libsleef.
*
* By default, SIMDe will use the 1 ULP functions, but if you use
* SIMDE_ACCURACY_PREFERENCE of 0 we will use up to 4 ULP. This is
* only the case for the simde_math_* functions; for code in other
* SIMDe headers which calls SLEEF directly we may use functions with
* greater error if the API we're implementing is less precise (for
* example, SVML guarantees 4 ULP, so we will generally use the 3.5
* ULP functions from SLEEF). */
#if !defined(SIMDE_MATH_SLEEF_DISABLE)
#if defined(__SLEEF_H__)
#define SIMDE_MATH_SLEEF_ENABLE
#endif
#endif
#if defined(SIMDE_MATH_SLEEF_ENABLE) && !defined(__SLEEF_H__)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_
#include <sleef.h>
HEDLEY_DIAGNOSTIC_POP
#endif
#if defined(SIMDE_MATH_SLEEF_ENABLE) && defined(__SLEEF_H__)
#if defined(SLEEF_VERSION_MAJOR)
#define SIMDE_MATH_SLEEF_VERSION_CHECK(major, minor, patch) (HEDLEY_VERSION_ENCODE(SLEEF_VERSION_MAJOR, SLEEF_VERSION_MINOR, SLEEF_VERSION_PATCHLEVEL) >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define SIMDE_MATH_SLEEF_VERSION_CHECK(major, minor, patch) (HEDLEY_VERSION_ENCODE(3,0,0) >= HEDLEY_VERSION_ENCODE(major, minor, patch))
#endif
#else
#define SIMDE_MATH_SLEEF_VERSION_CHECK(major, minor, patch) (0)
#endif
#if defined(__has_builtin)
#define SIMDE_MATH_BUILTIN_LIBM(func) __has_builtin(__builtin_##func)
#elif \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_GCC_VERSION_CHECK(4,4,0)
#define SIMDE_MATH_BUILTIN_LIBM(func) (1)
#else
#define SIMDE_MATH_BUILTIN_LIBM(func) (0)
#endif
#if defined(HUGE_VAL)
/* Looks like <math.h> or <cmath> has already been included. */
/* The math.h from libc++ (yes, the C header from the C++ standard
* library) will define an isnan function, but not an isnan macro
* like the C standard requires. So we detect the header guards
* macro libc++ uses. */
#if defined(isnan) || (defined(_LIBCPP_MATH_H) && !defined(_LIBCPP_CMATH))
#define SIMDE_MATH_HAVE_MATH_H
#elif defined(__cplusplus)
#define SIMDE_MATH_HAVE_CMATH
#endif
#elif defined(__has_include)
#if defined(__cplusplus) && (__cplusplus >= 201103L) && __has_include(<cmath>)
#define SIMDE_MATH_HAVE_CMATH
#include <cmath>
#elif __has_include(<math.h>)
#define SIMDE_MATH_HAVE_MATH_H
#include <math.h>
#elif !defined(SIMDE_MATH_NO_LIBM)
#define SIMDE_MATH_NO_LIBM
#endif
#elif !defined(SIMDE_MATH_NO_LIBM)
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define SIMDE_MATH_HAVE_CMATH
HEDLEY_DIAGNOSTIC_PUSH
#if defined(HEDLEY_MSVC_VERSION)
/* VS 14 emits this diagnostic about noexcept being used on a
* <cmath> function, which we can't do anything about. */
#pragma warning(disable:4996)
#endif
#include <cmath>
HEDLEY_DIAGNOSTIC_POP
#else
#define SIMDE_MATH_HAVE_MATH_H
#include <math.h>
#endif
#endif
#if !defined(SIMDE_MATH_INFINITY)
#if \
HEDLEY_HAS_BUILTIN(__builtin_inf) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0)
#define SIMDE_MATH_INFINITY (__builtin_inf())
#elif defined(INFINITY)
#define SIMDE_MATH_INFINITY INFINITY
#endif
#endif
#if !defined(SIMDE_INFINITYF)
#if \
HEDLEY_HAS_BUILTIN(__builtin_inff) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0)
#define SIMDE_MATH_INFINITYF (__builtin_inff())
#elif defined(INFINITYF)
#define SIMDE_MATH_INFINITYF INFINITYF
#elif defined(SIMDE_MATH_INFINITY)
#define SIMDE_MATH_INFINITYF HEDLEY_STATIC_CAST(float, SIMDE_MATH_INFINITY)
#endif
#endif
#if !defined(SIMDE_MATH_NAN)
#if \
HEDLEY_HAS_BUILTIN(__builtin_nan) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0)
#define SIMDE_MATH_NAN (__builtin_nan(""))
#elif defined(NAN)
#define SIMDE_MATH_NAN NAN
#endif
#endif
#if !defined(SIMDE_NANF)
#if \
HEDLEY_HAS_BUILTIN(__builtin_nanf) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0)
#define SIMDE_MATH_NANF (__builtin_nanf(""))
#elif defined(NANF)
#define SIMDE_MATH_NANF NANF
#elif defined(SIMDE_MATH_NAN)
#define SIMDE_MATH_NANF HEDLEY_STATIC_CAST(float, SIMDE_MATH_NAN)
#endif
#endif
#if !defined(SIMDE_MATH_PI)
#if defined(M_PI)
#define SIMDE_MATH_PI M_PI
#else
#define SIMDE_MATH_PI 3.14159265358979323846
#endif
#endif
#if !defined(SIMDE_MATH_PIF)
#if defined(M_PI)
#define SIMDE_MATH_PIF HEDLEY_STATIC_CAST(float, M_PI)
#else
#define SIMDE_MATH_PIF 3.14159265358979323846f
#endif
#endif
#if !defined(SIMDE_MATH_PI_OVER_180)
#define SIMDE_MATH_PI_OVER_180 0.0174532925199432957692369076848861271344287188854172545609719144
#endif
#if !defined(SIMDE_MATH_PI_OVER_180F)
#define SIMDE_MATH_PI_OVER_180F 0.0174532925199432957692369076848861271344287188854172545609719144f
#endif
#if !defined(SIMDE_MATH_180_OVER_PI)
#define SIMDE_MATH_180_OVER_PI 57.295779513082320876798154814105170332405472466564321549160243861
#endif
#if !defined(SIMDE_MATH_180_OVER_PIF)
#define SIMDE_MATH_180_OVER_PIF 57.295779513082320876798154814105170332405472466564321549160243861f
#endif
#if !defined(SIMDE_MATH_FLT_MIN)
#if defined(__FLT_MIN__)
#define SIMDE_MATH_FLT_MIN __FLT_MIN__
#else
#if !defined(FLT_MIN)
#if defined(__cplusplus)
#include <cfloat>
#else
#include <float.h>
#endif
#endif
#define SIMDE_MATH_FLT_MIN FLT_MIN
#endif
#endif
#if !defined(SIMDE_MATH_FLT_MAX)
#if defined(__FLT_MAX__)
#define SIMDE_MATH_FLT_MAX __FLT_MAX__
#else
#if !defined(FLT_MAX)
#if defined(__cplusplus)
#include <cfloat>
#else
#include <float.h>
#endif
#endif
#define SIMDE_MATH_FLT_MAX FLT_MAX
#endif
#endif
#if !defined(SIMDE_MATH_DBL_MIN)
#if defined(__DBL_MIN__)
#define SIMDE_MATH_DBL_MIN __DBL_MIN__
#else
#if !defined(DBL_MIN)
#if defined(__cplusplus)
#include <cfloat>
#else
#include <float.h>
#endif
#endif
#define SIMDE_MATH_DBL_MIN DBL_MIN
#endif
#endif
#if !defined(SIMDE_MATH_DBL_MAX)
#if defined(__DBL_MAX__)
#define SIMDE_MATH_DBL_MAX __DBL_MAX__
#else
#if !defined(DBL_MAX)
#if defined(__cplusplus)
#include <cfloat>
#else
#include <float.h>
#endif
#endif
#define SIMDE_MATH_DBL_MAX DBL_MAX
#endif
#endif
/*** Classification macros from C99 ***/
#if !defined(simde_math_isinf)
#if SIMDE_MATH_BUILTIN_LIBM(isinf)
#define simde_math_isinf(v) __builtin_isinf(v)
#elif defined(isinf) || defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_isinf(v) isinf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_isinf(v) std::isinf(v)
#endif
#endif
#if !defined(simde_math_isinff)
#if HEDLEY_HAS_BUILTIN(__builtin_isinff) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0)
#define simde_math_isinff(v) __builtin_isinff(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_isinff(v) std::isinf(v)
#elif defined(simde_math_isinf)
#define simde_math_isinff(v) simde_math_isinf(HEDLEY_STATIC_CAST(double, v))
#endif
#endif
#if !defined(simde_math_isnan)
#if SIMDE_MATH_BUILTIN_LIBM(isnan)
#define simde_math_isnan(v) __builtin_isnan(v)
#elif defined(isnan) || defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_isnan(v) isnan(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_isnan(v) std::isnan(v)
#endif
#endif
#if !defined(simde_math_isnanf)
#if HEDLEY_HAS_BUILTIN(__builtin_isnanf) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0)
/* XL C/C++ has __builtin_isnan but not __builtin_isnanf */
#define simde_math_isnanf(v) __builtin_isnanf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_isnanf(v) std::isnan(v)
#elif defined(simde_math_isnan)
#define simde_math_isnanf(v) simde_math_isnan(HEDLEY_STATIC_CAST(double, v))
#endif
#endif
#if !defined(simde_math_isnormal)
#if SIMDE_MATH_BUILTIN_LIBM(isnormal)
#define simde_math_isnormal(v) __builtin_isnormal(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_isnormal(v) isnormal(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_isnormal(v) std::isnormal(v)
#endif
#endif
#if !defined(simde_math_isnormalf)
#if HEDLEY_HAS_BUILTIN(__builtin_isnormalf)
#define simde_math_isnormalf(v) __builtin_isnormalf(v)
#elif SIMDE_MATH_BUILTIN_LIBM(isnormal)
#define simde_math_isnormalf(v) __builtin_isnormal(v)
#elif defined(isnormalf)
#define simde_math_isnormalf(v) isnormalf(v)
#elif defined(isnormal) || defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_isnormalf(v) isnormal(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_isnormalf(v) std::isnormal(v)
#elif defined(simde_math_isnormal)
#define simde_math_isnormalf(v) simde_math_isnormal(v)
#endif
#endif
#if !defined(simde_math_issubnormalf)
#if SIMDE_MATH_BUILTIN_LIBM(fpclassify)
#define simde_math_issubnormalf(v) __builtin_fpclassify(0, 0, 0, 1, 0, v)
#elif defined(fpclassify)
#define simde_math_issubnormalf(v) (fpclassify(v) == FP_SUBNORMAL)
#elif defined(SIMDE_IEEE754_STORAGE)
#define simde_math_issubnormalf(v) (((simde_float32_as_uint32(v) & UINT32_C(0x7F800000)) == UINT32_C(0)) && ((simde_float32_as_uint32(v) & UINT32_C(0x007FFFFF)) != UINT32_C(0)))
#endif
#endif
#if !defined(simde_math_issubnormal)
#if SIMDE_MATH_BUILTIN_LIBM(fpclassify)
#define simde_math_issubnormal(v) __builtin_fpclassify(0, 0, 0, 1, 0, v)
#elif defined(fpclassify)
#define simde_math_issubnormal(v) (fpclassify(v) == FP_SUBNORMAL)
#elif defined(SIMDE_IEEE754_STORAGE)
#define simde_math_issubnormal(v) (((simde_float64_as_uint64(v) & UINT64_C(0x7FF0000000000000)) == UINT64_C(0)) && ((simde_float64_as_uint64(v) & UINT64_C(0x00FFFFFFFFFFFFF)) != UINT64_C(0)))
#endif
#endif
#if defined(FP_NAN)
#define SIMDE_MATH_FP_NAN FP_NAN
#else
#define SIMDE_MATH_FP_NAN 0
#endif
#if defined(FP_INFINITE)
#define SIMDE_MATH_FP_INFINITE FP_INFINITE
#else
#define SIMDE_MATH_FP_INFINITE 1
#endif
#if defined(FP_ZERO)
#define SIMDE_MATH_FP_ZERO FP_ZERO
#else
#define SIMDE_MATH_FP_ZERO 2
#endif
#if defined(FP_SUBNORMAL)
#define SIMDE_MATH_FP_SUBNORMAL FP_SUBNORMAL
#else
#define SIMDE_MATH_FP_SUBNORMAL 3
#endif
#if defined(FP_NORMAL)
#define SIMDE_MATH_FP_NORMAL FP_NORMAL
#else
#define SIMDE_MATH_FP_NORMAL 4
#endif
static HEDLEY_INLINE
int
simde_math_fpclassifyf(float v) {
#if SIMDE_MATH_BUILTIN_LIBM(fpclassify)
return __builtin_fpclassify(SIMDE_MATH_FP_NAN, SIMDE_MATH_FP_INFINITE, SIMDE_MATH_FP_NORMAL, SIMDE_MATH_FP_SUBNORMAL, SIMDE_MATH_FP_ZERO, v);
#elif defined(fpclassify)
return fpclassify(v);
#else
return
simde_math_isnormalf(v) ? SIMDE_MATH_FP_NORMAL :
(v == 0.0f) ? SIMDE_MATH_FP_ZERO :
simde_math_isnanf(v) ? SIMDE_MATH_FP_NAN :
simde_math_isinff(v) ? SIMDE_MATH_FP_INFINITE :
SIMDE_MATH_FP_SUBNORMAL;
#endif
}
static HEDLEY_INLINE
int
simde_math_fpclassify(double v) {
#if SIMDE_MATH_BUILTIN_LIBM(fpclassify)
return __builtin_fpclassify(SIMDE_MATH_FP_NAN, SIMDE_MATH_FP_INFINITE, SIMDE_MATH_FP_NORMAL, SIMDE_MATH_FP_SUBNORMAL, SIMDE_MATH_FP_ZERO, v);
#elif defined(fpclassify)
return fpclassify(v);
#else
return
simde_math_isnormal(v) ? SIMDE_MATH_FP_NORMAL :
(v == 0.0) ? SIMDE_MATH_FP_ZERO :
simde_math_isnan(v) ? SIMDE_MATH_FP_NAN :
simde_math_isinf(v) ? SIMDE_MATH_FP_INFINITE :
SIMDE_MATH_FP_SUBNORMAL;
#endif
}
/*** Manipulation functions ***/
#if !defined(simde_math_nextafter)
#if \
(HEDLEY_HAS_BUILTIN(__builtin_nextafter) && !defined(HEDLEY_IBM_VERSION)) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define simde_math_nextafter(x, y) __builtin_nextafter(x, y)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_nextafter(x, y) std::nextafter(x, y)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_nextafter(x, y) nextafter(x, y)
#endif
#endif
#if !defined(simde_math_nextafterf)
#if \
(HEDLEY_HAS_BUILTIN(__builtin_nextafterf) && !defined(HEDLEY_IBM_VERSION)) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define simde_math_nextafterf(x, y) __builtin_nextafterf(x, y)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_nextafterf(x, y) std::nextafter(x, y)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_nextafterf(x, y) nextafterf(x, y)
#endif
#endif
/*** Functions from C99 ***/
#if !defined(simde_math_abs)
#if SIMDE_MATH_BUILTIN_LIBM(abs)
#define simde_math_abs(v) __builtin_abs(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_abs(v) std::abs(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_abs(v) abs(v)
#endif
#endif
#if !defined(simde_math_labs)
#if SIMDE_MATH_BUILTIN_LIBM(labs)
#define simde_math_labs(v) __builtin_labs(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_labs(v) std::labs(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_labs(v) labs(v)
#endif
#endif
#if !defined(simde_math_llabs)
#if SIMDE_MATH_BUILTIN_LIBM(llabs)
#define simde_math_llabs(v) __builtin_llabs(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_llabs(v) std::llabs(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_llabs(v) llabs(v)
#endif
#endif
#if !defined(simde_math_fabsf)
#if SIMDE_MATH_BUILTIN_LIBM(fabsf)
#define simde_math_fabsf(v) __builtin_fabsf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fabsf(v) std::abs(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fabsf(v) fabsf(v)
#endif
#endif
#if !defined(simde_math_acos)
#if SIMDE_MATH_BUILTIN_LIBM(acos)
#define simde_math_acos(v) __builtin_acos(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_acos(v) std::acos(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_acos(v) acos(v)
#endif
#endif
#if !defined(simde_math_acosf)
#if SIMDE_MATH_BUILTIN_LIBM(acosf)
#define simde_math_acosf(v) __builtin_acosf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_acosf(v) std::acos(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_acosf(v) acosf(v)
#endif
#endif
#if !defined(simde_math_acosh)
#if SIMDE_MATH_BUILTIN_LIBM(acosh)
#define simde_math_acosh(v) __builtin_acosh(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_acosh(v) std::acosh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_acosh(v) acosh(v)
#endif
#endif
#if !defined(simde_math_acoshf)
#if SIMDE_MATH_BUILTIN_LIBM(acoshf)
#define simde_math_acoshf(v) __builtin_acoshf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_acoshf(v) std::acosh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_acoshf(v) acoshf(v)
#endif
#endif
#if !defined(simde_math_asin)
#if SIMDE_MATH_BUILTIN_LIBM(asin)
#define simde_math_asin(v) __builtin_asin(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_asin(v) std::asin(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_asin(v) asin(v)
#endif
#endif
#if !defined(simde_math_asinf)
#if SIMDE_MATH_BUILTIN_LIBM(asinf)
#define simde_math_asinf(v) __builtin_asinf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_asinf(v) std::asin(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_asinf(v) asinf(v)
#endif
#endif
#if !defined(simde_math_asinh)
#if SIMDE_MATH_BUILTIN_LIBM(asinh)
#define simde_math_asinh(v) __builtin_asinh(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_asinh(v) std::asinh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_asinh(v) asinh(v)
#endif
#endif
#if !defined(simde_math_asinhf)
#if SIMDE_MATH_BUILTIN_LIBM(asinhf)
#define simde_math_asinhf(v) __builtin_asinhf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_asinhf(v) std::asinh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_asinhf(v) asinhf(v)
#endif
#endif
#if !defined(simde_math_atan)
#if SIMDE_MATH_BUILTIN_LIBM(atan)
#define simde_math_atan(v) __builtin_atan(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_atan(v) std::atan(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_atan(v) atan(v)
#endif
#endif
#if !defined(simde_math_atan2)
#if SIMDE_MATH_BUILTIN_LIBM(atan2)
#define simde_math_atan2(y, x) __builtin_atan2(y, x)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_atan2(y, x) std::atan2(y, x)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_atan2(y, x) atan2(y, x)
#endif
#endif
#if !defined(simde_math_atan2f)
#if SIMDE_MATH_BUILTIN_LIBM(atan2f)
#define simde_math_atan2f(y, x) __builtin_atan2f(y, x)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_atan2f(y, x) std::atan2(y, x)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_atan2f(y, x) atan2f(y, x)
#endif
#endif
#if !defined(simde_math_atanf)
#if SIMDE_MATH_BUILTIN_LIBM(atanf)
#define simde_math_atanf(v) __builtin_atanf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_atanf(v) std::atan(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_atanf(v) atanf(v)
#endif
#endif
#if !defined(simde_math_atanh)
#if SIMDE_MATH_BUILTIN_LIBM(atanh)
#define simde_math_atanh(v) __builtin_atanh(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_atanh(v) std::atanh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_atanh(v) atanh(v)
#endif
#endif
#if !defined(simde_math_atanhf)
#if SIMDE_MATH_BUILTIN_LIBM(atanhf)
#define simde_math_atanhf(v) __builtin_atanhf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_atanhf(v) std::atanh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_atanhf(v) atanhf(v)
#endif
#endif
#if !defined(simde_math_cbrt)
#if SIMDE_MATH_BUILTIN_LIBM(cbrt)
#define simde_math_cbrt(v) __builtin_cbrt(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_cbrt(v) std::cbrt(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_cbrt(v) cbrt(v)
#endif
#endif
#if !defined(simde_math_cbrtf)
#if SIMDE_MATH_BUILTIN_LIBM(cbrtf)
#define simde_math_cbrtf(v) __builtin_cbrtf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_cbrtf(v) std::cbrt(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_cbrtf(v) cbrtf(v)
#endif
#endif
#if !defined(simde_math_ceil)
#if SIMDE_MATH_BUILTIN_LIBM(ceil)
#define simde_math_ceil(v) __builtin_ceil(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_ceil(v) std::ceil(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_ceil(v) ceil(v)
#endif
#endif
#if !defined(simde_math_ceilf)
#if SIMDE_MATH_BUILTIN_LIBM(ceilf)
#define simde_math_ceilf(v) __builtin_ceilf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_ceilf(v) std::ceil(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_ceilf(v) ceilf(v)
#endif
#endif
#if !defined(simde_math_copysign)
#if SIMDE_MATH_BUILTIN_LIBM(copysign)
#define simde_math_copysign(x, y) __builtin_copysign(x, y)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_copysign(x, y) std::copysign(x, y)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_copysign(x, y) copysign(x, y)
#endif
#endif
#if !defined(simde_math_copysignf)
#if SIMDE_MATH_BUILTIN_LIBM(copysignf)
#define simde_math_copysignf(x, y) __builtin_copysignf(x, y)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_copysignf(x, y) std::copysignf(x, y)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_copysignf(x, y) copysignf(x, y)
#endif
#endif
#if !defined(simde_math_cos)
#if SIMDE_MATH_BUILTIN_LIBM(cos)
#define simde_math_cos(v) __builtin_cos(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_cos(v) std::cos(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_cos(v) cos(v)
#endif
#endif
#if !defined(simde_math_cosf)
#if defined(SIMDE_MATH_SLEEF_ENABLE)
#if SIMDE_ACCURACY_PREFERENCE < 1
#define simde_math_cosf(v) Sleef_cosf_u35(v)
#else
#define simde_math_cosf(v) Sleef_cosf_u10(v)
#endif
#elif SIMDE_MATH_BUILTIN_LIBM(cosf)
#define simde_math_cosf(v) __builtin_cosf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_cosf(v) std::cos(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_cosf(v) cosf(v)
#endif
#endif
#if !defined(simde_math_cosh)
#if SIMDE_MATH_BUILTIN_LIBM(cosh)
#define simde_math_cosh(v) __builtin_cosh(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_cosh(v) std::cosh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_cosh(v) cosh(v)
#endif
#endif
#if !defined(simde_math_coshf)
#if SIMDE_MATH_BUILTIN_LIBM(coshf)
#define simde_math_coshf(v) __builtin_coshf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_coshf(v) std::cosh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_coshf(v) coshf(v)
#endif
#endif
#if !defined(simde_math_erf)
#if SIMDE_MATH_BUILTIN_LIBM(erf)
#define simde_math_erf(v) __builtin_erf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_erf(v) std::erf(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_erf(v) erf(v)
#endif
#endif
#if !defined(simde_math_erff)
#if SIMDE_MATH_BUILTIN_LIBM(erff)
#define simde_math_erff(v) __builtin_erff(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_erff(v) std::erf(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_erff(v) erff(v)
#endif
#endif
#if !defined(simde_math_erfc)
#if SIMDE_MATH_BUILTIN_LIBM(erfc)
#define simde_math_erfc(v) __builtin_erfc(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_erfc(v) std::erfc(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_erfc(v) erfc(v)
#endif
#endif
#if !defined(simde_math_erfcf)
#if SIMDE_MATH_BUILTIN_LIBM(erfcf)
#define simde_math_erfcf(v) __builtin_erfcf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_erfcf(v) std::erfc(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_erfcf(v) erfcf(v)
#endif
#endif
#if !defined(simde_math_exp)
#if SIMDE_MATH_BUILTIN_LIBM(exp)
#define simde_math_exp(v) __builtin_exp(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_exp(v) std::exp(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_exp(v) exp(v)
#endif
#endif
#if !defined(simde_math_expf)
#if SIMDE_MATH_BUILTIN_LIBM(expf)
#define simde_math_expf(v) __builtin_expf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_expf(v) std::exp(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_expf(v) expf(v)
#endif
#endif
#if !defined(simde_math_expm1)
#if SIMDE_MATH_BUILTIN_LIBM(expm1)
#define simde_math_expm1(v) __builtin_expm1(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_expm1(v) std::expm1(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_expm1(v) expm1(v)
#endif
#endif
#if !defined(simde_math_expm1f)
#if SIMDE_MATH_BUILTIN_LIBM(expm1f)
#define simde_math_expm1f(v) __builtin_expm1f(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_expm1f(v) std::expm1(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_expm1f(v) expm1f(v)
#endif
#endif
#if !defined(simde_math_exp2)
#if SIMDE_MATH_BUILTIN_LIBM(exp2)
#define simde_math_exp2(v) __builtin_exp2(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_exp2(v) std::exp2(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_exp2(v) exp2(v)
#endif
#endif
#if !defined(simde_math_exp2f)
#if SIMDE_MATH_BUILTIN_LIBM(exp2f)
#define simde_math_exp2f(v) __builtin_exp2f(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_exp2f(v) std::exp2(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_exp2f(v) exp2f(v)
#endif
#endif
#if HEDLEY_HAS_BUILTIN(__builtin_exp10) || HEDLEY_GCC_VERSION_CHECK(3,4,0)
# define simde_math_exp10(v) __builtin_exp10(v)
#else
# define simde_math_exp10(v) pow(10.0, (v))
#endif
#if HEDLEY_HAS_BUILTIN(__builtin_exp10f) || HEDLEY_GCC_VERSION_CHECK(3,4,0)
# define simde_math_exp10f(v) __builtin_exp10f(v)
#else
# define simde_math_exp10f(v) powf(10.0f, (v))
#endif
#if !defined(simde_math_fabs)
#if SIMDE_MATH_BUILTIN_LIBM(fabs)
#define simde_math_fabs(v) __builtin_fabs(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fabs(v) std::fabs(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fabs(v) fabs(v)
#endif
#endif
#if !defined(simde_math_fabsf)
#if SIMDE_MATH_BUILTIN_LIBM(fabsf)
#define simde_math_fabsf(v) __builtin_fabsf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fabsf(v) std::fabs(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fabsf(v) fabsf(v)
#endif
#endif
#if !defined(simde_math_floor)
#if SIMDE_MATH_BUILTIN_LIBM(floor)
#define simde_math_floor(v) __builtin_floor(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_floor(v) std::floor(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_floor(v) floor(v)
#endif
#endif
#if !defined(simde_math_floorf)
#if SIMDE_MATH_BUILTIN_LIBM(floorf)
#define simde_math_floorf(v) __builtin_floorf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_floorf(v) std::floor(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_floorf(v) floorf(v)
#endif
#endif
#if !defined(simde_math_fma)
#if SIMDE_MATH_BUILTIN_LIBM(fma)
#define simde_math_fma(x, y, z) __builtin_fma(x, y, z)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fma(x, y, z) std::fma(x, y, z)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fma(x, y, z) fma(x, y, z)
#endif
#endif
#if !defined(simde_math_fmaf)
#if SIMDE_MATH_BUILTIN_LIBM(fmaf)
#define simde_math_fmaf(x, y, z) __builtin_fmaf(x, y, z)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fmaf(x, y, z) std::fma(x, y, z)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fmaf(x, y, z) fmaf(x, y, z)
#endif
#endif
#if !defined(simde_math_fmax)
#if SIMDE_MATH_BUILTIN_LIBM(fmax)
#define simde_math_fmax(x, y) __builtin_fmax(x, y)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fmax(x, y) std::fmax(x, y)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fmax(x, y) fmax(x, y)
#endif
#endif
#if !defined(simde_math_fmaxf)
#if SIMDE_MATH_BUILTIN_LIBM(fmaxf)
#define simde_math_fmaxf(x, y) __builtin_fmaxf(x, y)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_fmaxf(x, y) std::fmax(x, y)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_fmaxf(x, y) fmaxf(x, y)
#endif
#endif
#if !defined(simde_math_hypot)
#if SIMDE_MATH_BUILTIN_LIBM(hypot)
#define simde_math_hypot(y, x) __builtin_hypot(y, x)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_hypot(y, x) std::hypot(y, x)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_hypot(y, x) hypot(y, x)
#endif
#endif
#if !defined(simde_math_hypotf)
#if SIMDE_MATH_BUILTIN_LIBM(hypotf)
#define simde_math_hypotf(y, x) __builtin_hypotf(y, x)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_hypotf(y, x) std::hypot(y, x)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_hypotf(y, x) hypotf(y, x)
#endif
#endif
#if !defined(simde_math_log)
#if SIMDE_MATH_BUILTIN_LIBM(log)
#define simde_math_log(v) __builtin_log(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log(v) std::log(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log(v) log(v)
#endif
#endif
#if !defined(simde_math_logf)
#if SIMDE_MATH_BUILTIN_LIBM(logf)
#define simde_math_logf(v) __builtin_logf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_logf(v) std::log(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_logf(v) logf(v)
#endif
#endif
#if !defined(simde_math_logb)
#if SIMDE_MATH_BUILTIN_LIBM(logb)
#define simde_math_logb(v) __builtin_logb(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_logb(v) std::logb(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_logb(v) logb(v)
#endif
#endif
#if !defined(simde_math_logbf)
#if SIMDE_MATH_BUILTIN_LIBM(logbf)
#define simde_math_logbf(v) __builtin_logbf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_logbf(v) std::logb(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_logbf(v) logbf(v)
#endif
#endif
#if !defined(simde_math_log1p)
#if SIMDE_MATH_BUILTIN_LIBM(log1p)
#define simde_math_log1p(v) __builtin_log1p(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log1p(v) std::log1p(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log1p(v) log1p(v)
#endif
#endif
#if !defined(simde_math_log1pf)
#if SIMDE_MATH_BUILTIN_LIBM(log1pf)
#define simde_math_log1pf(v) __builtin_log1pf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log1pf(v) std::log1p(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log1pf(v) log1pf(v)
#endif
#endif
#if !defined(simde_math_log2)
#if SIMDE_MATH_BUILTIN_LIBM(log2)
#define simde_math_log2(v) __builtin_log2(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log2(v) std::log2(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log2(v) log2(v)
#endif
#endif
#if !defined(simde_math_log2f)
#if SIMDE_MATH_BUILTIN_LIBM(log2f)
#define simde_math_log2f(v) __builtin_log2f(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log2f(v) std::log2(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log2f(v) log2f(v)
#endif
#endif
#if !defined(simde_math_log10)
#if SIMDE_MATH_BUILTIN_LIBM(log10)
#define simde_math_log10(v) __builtin_log10(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log10(v) std::log10(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log10(v) log10(v)
#endif
#endif
#if !defined(simde_math_log10f)
#if SIMDE_MATH_BUILTIN_LIBM(log10f)
#define simde_math_log10f(v) __builtin_log10f(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_log10f(v) std::log10(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_log10f(v) log10f(v)
#endif
#endif
#if !defined(simde_math_modf)
#if SIMDE_MATH_BUILTIN_LIBM(modf)
#define simde_math_modf(x, iptr) __builtin_modf(x, iptr)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_modf(x, iptr) std::modf(x, iptr)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_modf(x, iptr) modf(x, iptr)
#endif
#endif
#if !defined(simde_math_modff)
#if SIMDE_MATH_BUILTIN_LIBM(modff)
#define simde_math_modff(x, iptr) __builtin_modff(x, iptr)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_modff(x, iptr) std::modf(x, iptr)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_modff(x, iptr) modff(x, iptr)
#endif
#endif
#if !defined(simde_math_nearbyint)
#if SIMDE_MATH_BUILTIN_LIBM(nearbyint)
#define simde_math_nearbyint(v) __builtin_nearbyint(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_nearbyint(v) std::nearbyint(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_nearbyint(v) nearbyint(v)
#endif
#endif
#if !defined(simde_math_nearbyintf)
#if SIMDE_MATH_BUILTIN_LIBM(nearbyintf)
#define simde_math_nearbyintf(v) __builtin_nearbyintf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_nearbyintf(v) std::nearbyint(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_nearbyintf(v) nearbyintf(v)
#endif
#endif
#if !defined(simde_math_pow)
#if SIMDE_MATH_BUILTIN_LIBM(pow)
#define simde_math_pow(y, x) __builtin_pow(y, x)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_pow(y, x) std::pow(y, x)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_pow(y, x) pow(y, x)
#endif
#endif
#if !defined(simde_math_powf)
#if SIMDE_MATH_BUILTIN_LIBM(powf)
#define simde_math_powf(y, x) __builtin_powf(y, x)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_powf(y, x) std::pow(y, x)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_powf(y, x) powf(y, x)
#endif
#endif
#if !defined(simde_math_rint)
#if SIMDE_MATH_BUILTIN_LIBM(rint)
#define simde_math_rint(v) __builtin_rint(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_rint(v) std::rint(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_rint(v) rint(v)
#endif
#endif
#if !defined(simde_math_rintf)
#if SIMDE_MATH_BUILTIN_LIBM(rintf)
#define simde_math_rintf(v) __builtin_rintf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_rintf(v) std::rint(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_rintf(v) rintf(v)
#endif
#endif
#if !defined(simde_math_round)
#if SIMDE_MATH_BUILTIN_LIBM(round)
#define simde_math_round(v) __builtin_round(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_round(v) std::round(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_round(v) round(v)
#endif
#endif
#if !defined(simde_math_roundf)
#if SIMDE_MATH_BUILTIN_LIBM(roundf)
#define simde_math_roundf(v) __builtin_roundf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_roundf(v) std::round(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_roundf(v) roundf(v)
#endif
#endif
#if !defined(simde_math_roundeven)
#if \
HEDLEY_HAS_BUILTIN(__builtin_roundeven) || \
HEDLEY_GCC_VERSION_CHECK(10,0,0)
#define simde_math_roundeven(v) __builtin_roundeven(v)
#elif defined(simde_math_round) && defined(simde_math_fabs)
static HEDLEY_INLINE
double
simde_math_roundeven(double v) {
double rounded = simde_math_round(v);
double diff = rounded - v;
if (HEDLEY_UNLIKELY(simde_math_fabs(diff) == 0.5) && (HEDLEY_STATIC_CAST(int64_t, rounded) & 1)) {
rounded = v - diff;
}
return rounded;
}
#define simde_math_roundeven simde_math_roundeven
#endif
#endif
#if !defined(simde_math_roundevenf)
#if \
HEDLEY_HAS_BUILTIN(__builtin_roundevenf) || \
HEDLEY_GCC_VERSION_CHECK(10,0,0)
#define simde_math_roundevenf(v) __builtin_roundevenf(v)
#elif defined(simde_math_roundf) && defined(simde_math_fabsf)
static HEDLEY_INLINE
float
simde_math_roundevenf(float v) {
float rounded = simde_math_roundf(v);
float diff = rounded - v;
if (HEDLEY_UNLIKELY(simde_math_fabsf(diff) == 0.5f) && (HEDLEY_STATIC_CAST(int32_t, rounded) & 1)) {
rounded = v - diff;
}
return rounded;
}
#define simde_math_roundevenf simde_math_roundevenf
#endif
#endif
#if !defined(simde_math_sin)
#if SIMDE_MATH_BUILTIN_LIBM(sin)
#define simde_math_sin(v) __builtin_sin(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_sin(v) std::sin(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_sin(v) sin(v)
#endif
#endif
#if !defined(simde_math_sinf)
#if SIMDE_MATH_BUILTIN_LIBM(sinf)
#define simde_math_sinf(v) __builtin_sinf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_sinf(v) std::sin(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_sinf(v) sinf(v)
#endif
#endif
#if !defined(simde_math_sinh)
#if SIMDE_MATH_BUILTIN_LIBM(sinh)
#define simde_math_sinh(v) __builtin_sinh(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_sinh(v) std::sinh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_sinh(v) sinh(v)
#endif
#endif
#if !defined(simde_math_sinhf)
#if SIMDE_MATH_BUILTIN_LIBM(sinhf)
#define simde_math_sinhf(v) __builtin_sinhf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_sinhf(v) std::sinh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_sinhf(v) sinhf(v)
#endif
#endif
#if !defined(simde_math_sqrt)
#if SIMDE_MATH_BUILTIN_LIBM(sqrt)
#define simde_math_sqrt(v) __builtin_sqrt(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_sqrt(v) std::sqrt(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_sqrt(v) sqrt(v)
#endif
#endif
#if !defined(simde_math_sqrtf)
#if SIMDE_MATH_BUILTIN_LIBM(sqrtf)
#define simde_math_sqrtf(v) __builtin_sqrtf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_sqrtf(v) std::sqrt(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_sqrtf(v) sqrtf(v)
#endif
#endif
#if !defined(simde_math_tan)
#if SIMDE_MATH_BUILTIN_LIBM(tan)
#define simde_math_tan(v) __builtin_tan(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_tan(v) std::tan(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_tan(v) tan(v)
#endif
#endif
#if !defined(simde_math_tanf)
#if SIMDE_MATH_BUILTIN_LIBM(tanf)
#define simde_math_tanf(v) __builtin_tanf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_tanf(v) std::tan(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_tanf(v) tanf(v)
#endif
#endif
#if !defined(simde_math_tanh)
#if SIMDE_MATH_BUILTIN_LIBM(tanh)
#define simde_math_tanh(v) __builtin_tanh(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_tanh(v) std::tanh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_tanh(v) tanh(v)
#endif
#endif
#if !defined(simde_math_tanhf)
#if SIMDE_MATH_BUILTIN_LIBM(tanhf)
#define simde_math_tanhf(v) __builtin_tanhf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_tanhf(v) std::tanh(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_tanhf(v) tanhf(v)
#endif
#endif
#if !defined(simde_math_trunc)
#if SIMDE_MATH_BUILTIN_LIBM(trunc)
#define simde_math_trunc(v) __builtin_trunc(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_trunc(v) std::trunc(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_trunc(v) trunc(v)
#endif
#endif
#if !defined(simde_math_truncf)
#if SIMDE_MATH_BUILTIN_LIBM(truncf)
#define simde_math_truncf(v) __builtin_truncf(v)
#elif defined(SIMDE_MATH_HAVE_CMATH)
#define simde_math_truncf(v) std::trunc(v)
#elif defined(SIMDE_MATH_HAVE_MATH_H)
#define simde_math_truncf(v) truncf(v)
#endif
#endif
/*** Comparison macros (which don't raise invalid errors) ***/
#if defined(isunordered)
#define simde_math_isunordered(x, y) isunordered(x, y)
#elif HEDLEY_HAS_BUILTIN(__builtin_isunordered)
#define simde_math_isunordered(x, y) __builtin_isunordered(x, y)
#else
static HEDLEY_INLINE
int simde_math_isunordered(double x, double y) {
return (x != y) && (x != x || y != y);
}
#define simde_math_isunordered simde_math_isunordered
static HEDLEY_INLINE
int simde_math_isunorderedf(float x, float y) {
return (x != y) && (x != x || y != y);
}
#define simde_math_isunorderedf simde_math_isunorderedf
#endif
#if !defined(simde_math_isunorderedf)
#define simde_math_isunorderedf simde_math_isunordered
#endif
/*** Additional functions not in libm ***/
#if defined(simde_math_fabs) && defined(simde_math_sqrt) && defined(simde_math_exp)
static HEDLEY_INLINE
double
simde_math_cdfnorm(double x) {
/* https://www.johndcook.com/blog/cpp_phi/
* Public Domain */
static const double a1 = 0.254829592;
static const double a2 = -0.284496736;
static const double a3 = 1.421413741;
static const double a4 = -1.453152027;
static const double a5 = 1.061405429;
static const double p = 0.3275911;
const int sign = x < 0;
x = simde_math_fabs(x) / simde_math_sqrt(2.0);
/* A&S formula 7.1.26 */
double t = 1.0 / (1.0 + p * x);
double y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * simde_math_exp(-x * x);
return 0.5 * (1.0 + (sign ? -y : y));
}
#define simde_math_cdfnorm simde_math_cdfnorm
#endif
#if defined(simde_math_fabsf) && defined(simde_math_sqrtf) && defined(simde_math_expf)
static HEDLEY_INLINE
float
simde_math_cdfnormf(float x) {
/* https://www.johndcook.com/blog/cpp_phi/
* Public Domain */
static const float a1 = 0.254829592f;
static const float a2 = -0.284496736f;
static const float a3 = 1.421413741f;
static const float a4 = -1.453152027f;
static const float a5 = 1.061405429f;
static const float p = 0.3275911f;
const int sign = x < 0;
x = simde_math_fabsf(x) / simde_math_sqrtf(2.0f);
/* A&S formula 7.1.26 */
float t = 1.0f / (1.0f + p * x);
float y = 1.0f - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * simde_math_expf(-x * x);
return 0.5f * (1.0f + (sign ? -y : y));
}
#define simde_math_cdfnormf simde_math_cdfnormf
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_
#if !defined(simde_math_cdfnorminv) && defined(simde_math_log) && defined(simde_math_sqrt)
/*https://web.archive.org/web/20150910081113/http://home.online.no/~pjacklam/notes/invnorm/impl/sprouse/ltqnorm.c*/
static HEDLEY_INLINE
double
simde_math_cdfnorminv(double p) {
static const double a[] = {
-3.969683028665376e+01,
2.209460984245205e+02,
-2.759285104469687e+02,
1.383577518672690e+02,
-3.066479806614716e+01,
2.506628277459239e+00
};
static const double b[] = {
-5.447609879822406e+01,
1.615858368580409e+02,
-1.556989798598866e+02,
6.680131188771972e+01,
-1.328068155288572e+01
};
static const double c[] = {
-7.784894002430293e-03,
-3.223964580411365e-01,
-2.400758277161838e+00,
-2.549732539343734e+00,
4.374664141464968e+00,
2.938163982698783e+00
};
static const double d[] = {
7.784695709041462e-03,
3.224671290700398e-01,
2.445134137142996e+00,
3.754408661907416e+00
};
static const double low = 0.02425;
static const double high = 0.97575;
double q, r;
if (p < 0 || p > 1) {
return 0.0;
} else if (p == 0) {
return -SIMDE_MATH_INFINITY;
} else if (p == 1) {
return SIMDE_MATH_INFINITY;
} else if (p < low) {
q = simde_math_sqrt(-2.0 * simde_math_log(p));
return
(((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) /
(((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1));
} else if (p > high) {
q = simde_math_sqrt(-2.0 * simde_math_log(1.0 - p));
return
-(((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) /
(((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1));
} else {
q = p - 0.5;
r = q * q;
return (((((a[0] * r + a[1]) * r + a[2]) * r + a[3]) * r + a[4]) * r + a[5]) *
q / (((((b[0] * r + b[1]) * r + b[2]) * r + b[3]) * r + b[4]) * r + 1);
}
}
#define simde_math_cdfnorminv simde_math_cdfnorminv
#endif
#if !defined(simde_math_cdfnorminvf) && defined(simde_math_logf) && defined(simde_math_sqrtf)
static HEDLEY_INLINE
float
simde_math_cdfnorminvf(float p) {
static const float a[] = {
-3.969683028665376e+01f,
2.209460984245205e+02f,
-2.759285104469687e+02f,
1.383577518672690e+02f,
-3.066479806614716e+01f,
2.506628277459239e+00f
};
static const float b[] = {
-5.447609879822406e+01f,
1.615858368580409e+02f,
-1.556989798598866e+02f,
6.680131188771972e+01f,
-1.328068155288572e+01f
};
static const float c[] = {
-7.784894002430293e-03f,
-3.223964580411365e-01f,
-2.400758277161838e+00f,
-2.549732539343734e+00f,
4.374664141464968e+00f,
2.938163982698783e+00f
};
static const float d[] = {
7.784695709041462e-03f,
3.224671290700398e-01f,
2.445134137142996e+00f,
3.754408661907416e+00f
};
static const float low = 0.02425f;
static const float high = 0.97575f;
float q, r;
if (p < 0 || p > 1) {
return 0.0f;
} else if (p == 0) {
return -SIMDE_MATH_INFINITYF;
} else if (p == 1) {
return SIMDE_MATH_INFINITYF;
} else if (p < low) {
q = simde_math_sqrtf(-2.0f * simde_math_logf(p));
return
(((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) /
(((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1));
} else if (p > high) {
q = simde_math_sqrtf(-2.0f * simde_math_logf(1.0f - p));
return
-(((((c[0] * q + c[1]) * q + c[2]) * q + c[3]) * q + c[4]) * q + c[5]) /
(((((d[0] * q + d[1]) * q + d[2]) * q + d[3]) * q + 1));
} else {
q = p - 0.5f;
r = q * q;
return (((((a[0] * r + a[1]) * r + a[2]) * r + a[3]) * r + a[4]) * r + a[5]) *
q / (((((b[0] * r + b[1]) * r + b[2]) * r + b[3]) * r + b[4]) * r + 1);
}
}
#define simde_math_cdfnorminvf simde_math_cdfnorminvf
#endif
#if !defined(simde_math_erfinv) && defined(simde_math_log) && defined(simde_math_copysign) && defined(simde_math_sqrt)
static HEDLEY_INLINE
double
simde_math_erfinv(double x) {
/* https://stackoverflow.com/questions/27229371/inverse-error-function-in-c
*
* The original answer on SO uses a constant of 0.147, but in my
* testing 0.14829094707965850830078125 gives a lower average absolute error
* (0.0001410958211636170744895935 vs. 0.0001465479290345683693885803).
* That said, if your goal is to minimize the *maximum* absolute
* error, 0.15449436008930206298828125 provides significantly better
* results; 0.0009250640869140625000000000 vs ~ 0.005. */
double tt1, tt2, lnx;
double sgn = simde_math_copysign(1.0, x);
x = (1.0 - x) * (1.0 + x);
lnx = simde_math_log(x);
tt1 = 2.0 / (SIMDE_MATH_PI * 0.14829094707965850830078125) + 0.5 * lnx;
tt2 = (1.0 / 0.14829094707965850830078125) * lnx;
return sgn * simde_math_sqrt(-tt1 + simde_math_sqrt(tt1 * tt1 - tt2));
}
#define simde_math_erfinv simde_math_erfinv
#endif
#if !defined(simde_math_erfinvf) && defined(simde_math_logf) && defined(simde_math_copysignf) && defined(simde_math_sqrtf)
static HEDLEY_INLINE
float
simde_math_erfinvf(float x) {
float tt1, tt2, lnx;
float sgn = simde_math_copysignf(1.0f, x);
x = (1.0f - x) * (1.0f + x);
lnx = simde_math_logf(x);
tt1 = 2.0f / (SIMDE_MATH_PIF * 0.14829094707965850830078125f) + 0.5f * lnx;
tt2 = (1.0f / 0.14829094707965850830078125f) * lnx;
return sgn * simde_math_sqrtf(-tt1 + simde_math_sqrtf(tt1 * tt1 - tt2));
}
#define simde_math_erfinvf simde_math_erfinvf
#endif
#if !defined(simde_math_erfcinv) && defined(simde_math_erfinv) && defined(simde_math_log) && defined(simde_math_sqrt)
static HEDLEY_INLINE
double
simde_math_erfcinv(double x) {
if(x >= 0.0625 && x < 2.0) {
return simde_math_erfinv(1.0 - x);
} else if (x < 0.0625 && x >= 1.0e-100) {
double p[6] = {
0.1550470003116,
1.382719649631,
0.690969348887,
-1.128081391617,
0.680544246825,
-0.16444156791
};
double q[3] = {
0.155024849822,
1.385228141995,
1.000000000000
};
const double t = 1.0 / simde_math_sqrt(-simde_math_log(x));
return (p[0] / t + p[1] + t * (p[2] + t * (p[3] + t * (p[4] + t * p[5])))) /
(q[0] + t * (q[1] + t * (q[2])));
} else if (x < 1.0e-100 && x >= SIMDE_MATH_DBL_MIN) {
double p[4] = {
0.00980456202915,
0.363667889171,
0.97302949837,
-0.5374947401
};
double q[3] = {
0.00980451277802,
0.363699971544,
1.000000000000
};
const double t = 1.0 / simde_math_sqrt(-simde_math_log(x));
return (p[0] / t + p[1] + t * (p[2] + t * p[3])) /
(q[0] + t * (q[1] + t * (q[2])));
} else if (!simde_math_isnormal(x)) {
return SIMDE_MATH_INFINITY;
} else {
return -SIMDE_MATH_INFINITY;
}
}
#define simde_math_erfcinv simde_math_erfcinv
#endif
#if !defined(simde_math_erfcinvf) && defined(simde_math_erfinvf) && defined(simde_math_logf) && defined(simde_math_sqrtf)
static HEDLEY_INLINE
float
simde_math_erfcinvf(float x) {
if(x >= 0.0625f && x < 2.0f) {
return simde_math_erfinvf(1.0f - x);
} else if (x < 0.0625f && x >= SIMDE_MATH_FLT_MIN) {
static const float p[6] = {
0.1550470003116f,
1.382719649631f,
0.690969348887f,
-1.128081391617f,
0.680544246825f
-0.164441567910f
};
static const float q[3] = {
0.155024849822f,
1.385228141995f,
1.000000000000f
};
const float t = 1.0f / simde_math_sqrtf(-simde_math_logf(x));
return (p[0] / t + p[1] + t * (p[2] + t * (p[3] + t * (p[4] + t * p[5])))) /
(q[0] + t * (q[1] + t * (q[2])));
} else if (x < SIMDE_MATH_FLT_MIN && simde_math_isnormalf(x)) {
static const float p[4] = {
0.00980456202915f,
0.36366788917100f,
0.97302949837000f,
-0.5374947401000f
};
static const float q[3] = {
0.00980451277802f,
0.36369997154400f,
1.00000000000000f
};
const float t = 1.0f / simde_math_sqrtf(-simde_math_logf(x));
return (p[0] / t + p[1] + t * (p[2] + t * p[3])) /
(q[0] + t * (q[1] + t * (q[2])));
} else {
return simde_math_isnormalf(x) ? -SIMDE_MATH_INFINITYF : SIMDE_MATH_INFINITYF;
}
}
#define simde_math_erfcinvf simde_math_erfcinvf
#endif
HEDLEY_DIAGNOSTIC_POP
static HEDLEY_INLINE
double
simde_math_rad2deg(double radians) {
return radians * SIMDE_MATH_180_OVER_PI;
}
static HEDLEY_INLINE
float
simde_math_rad2degf(float radians) {
return radians * SIMDE_MATH_180_OVER_PIF;
}
static HEDLEY_INLINE
double
simde_math_deg2rad(double degrees) {
return degrees * SIMDE_MATH_PI_OVER_180;
}
static HEDLEY_INLINE
float
simde_math_deg2radf(float degrees) {
return degrees * (SIMDE_MATH_PI_OVER_180F);
}
/*** Saturated arithmetic ***/
static HEDLEY_INLINE
int8_t
simde_math_adds_i8(int8_t a, int8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqaddb_s8(a, b);
#else
uint8_t a_ = HEDLEY_STATIC_CAST(uint8_t, a);
uint8_t b_ = HEDLEY_STATIC_CAST(uint8_t, b);
uint8_t r_ = a_ + b_;
a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT8_MAX;
if (HEDLEY_STATIC_CAST(int8_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int8_t, r_);
#endif
}
static HEDLEY_INLINE
int16_t
simde_math_adds_i16(int16_t a, int16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqaddh_s16(a, b);
#else
uint16_t a_ = HEDLEY_STATIC_CAST(uint16_t, a);
uint16_t b_ = HEDLEY_STATIC_CAST(uint16_t, b);
uint16_t r_ = a_ + b_;
a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT16_MAX;
if (HEDLEY_STATIC_CAST(int16_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int16_t, r_);
#endif
}
static HEDLEY_INLINE
int32_t
simde_math_adds_i32(int32_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqadds_s32(a, b);
#else
uint32_t a_ = HEDLEY_STATIC_CAST(uint32_t, a);
uint32_t b_ = HEDLEY_STATIC_CAST(uint32_t, b);
uint32_t r_ = a_ + b_;
a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT32_MAX;
if (HEDLEY_STATIC_CAST(int32_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int32_t, r_);
#endif
}
static HEDLEY_INLINE
int64_t
simde_math_adds_i64(int64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqaddd_s64(a, b);
#else
uint64_t a_ = HEDLEY_STATIC_CAST(uint64_t, a);
uint64_t b_ = HEDLEY_STATIC_CAST(uint64_t, b);
uint64_t r_ = a_ + b_;
a_ = (a_ >> ((8 * sizeof(r_)) - 1)) + INT64_MAX;
if (HEDLEY_STATIC_CAST(int64_t, ((a_ ^ b_) | ~(b_ ^ r_))) >= 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int64_t, r_);
#endif
}
static HEDLEY_INLINE
uint8_t
simde_math_adds_u8(uint8_t a, uint8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqaddb_u8(a, b);
#else
uint8_t r = a + b;
r |= -(r < a);
return r;
#endif
}
static HEDLEY_INLINE
uint16_t
simde_math_adds_u16(uint16_t a, uint16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqaddh_u16(a, b);
#else
uint16_t r = a + b;
r |= -(r < a);
return r;
#endif
}
static HEDLEY_INLINE
uint32_t
simde_math_adds_u32(uint32_t a, uint32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqadds_u32(a, b);
#else
uint32_t r = a + b;
r |= -(r < a);
return r;
#endif
}
static HEDLEY_INLINE
uint64_t
simde_math_adds_u64(uint64_t a, uint64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqaddd_u64(a, b);
#else
uint64_t r = a + b;
r |= -(r < a);
return r;
#endif
}
static HEDLEY_INLINE
int8_t
simde_math_subs_i8(int8_t a, int8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubb_s8(a, b);
#else
uint8_t a_ = HEDLEY_STATIC_CAST(uint8_t, a);
uint8_t b_ = HEDLEY_STATIC_CAST(uint8_t, b);
uint8_t r_ = a_ - b_;
a_ = (a_ >> 7) + INT8_MAX;
if (HEDLEY_STATIC_CAST(int8_t, (a_ ^ b_) & (a_ ^ r_)) < 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int8_t, r_);
#endif
}
static HEDLEY_INLINE
int16_t
simde_math_subs_i16(int16_t a, int16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubh_s16(a, b);
#else
uint16_t a_ = HEDLEY_STATIC_CAST(uint16_t, a);
uint16_t b_ = HEDLEY_STATIC_CAST(uint16_t, b);
uint16_t r_ = a_ - b_;
a_ = (a_ >> 15) + INT16_MAX;
if (HEDLEY_STATIC_CAST(int16_t, (a_ ^ b_) & (a_ ^ r_)) < 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int16_t, r_);
#endif
}
static HEDLEY_INLINE
int32_t
simde_math_subs_i32(int32_t a, int32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubs_s32(a, b);
#else
uint32_t a_ = HEDLEY_STATIC_CAST(uint32_t, a);
uint32_t b_ = HEDLEY_STATIC_CAST(uint32_t, b);
uint32_t r_ = a_ - b_;
a_ = (a_ >> 31) + INT32_MAX;
if (HEDLEY_STATIC_CAST(int32_t, (a_ ^ b_) & (a_ ^ r_)) < 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int32_t, r_);
#endif
}
static HEDLEY_INLINE
int64_t
simde_math_subs_i64(int64_t a, int64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubd_s64(a, b);
#else
uint64_t a_ = HEDLEY_STATIC_CAST(uint64_t, a);
uint64_t b_ = HEDLEY_STATIC_CAST(uint64_t, b);
uint64_t r_ = a_ - b_;
a_ = (a_ >> 63) + INT64_MAX;
if (HEDLEY_STATIC_CAST(int64_t, (a_ ^ b_) & (a_ ^ r_)) < 0) {
r_ = a_;
}
return HEDLEY_STATIC_CAST(int64_t, r_);
#endif
}
static HEDLEY_INLINE
uint8_t
simde_math_subs_u8(uint8_t a, uint8_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubb_u8(a, b);
#else
uint8_t res = a - b;
res &= -(res <= a);
return res;
#endif
}
static HEDLEY_INLINE
uint16_t
simde_math_subs_u16(uint16_t a, uint16_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubh_u16(a, b);
#else
uint16_t res = a - b;
res &= -(res <= a);
return res;
#endif
}
static HEDLEY_INLINE
uint32_t
simde_math_subs_u32(uint32_t a, uint32_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubs_u32(a, b);
#else
uint32_t res = a - b;
res &= -(res <= a);
return res;
#endif
}
static HEDLEY_INLINE
uint64_t
simde_math_subs_u64(uint64_t a, uint64_t b) {
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vqsubd_u64(a, b);
#else
uint64_t res = a - b;
res &= -(res <= a);
return res;
#endif
}
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_MATH_H) */
/* :: End simde-math.h :: */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-constify.h :: */
/* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2020 Evan Nemerson <evan@nemerson.com>
*/
/* Constify macros. For internal use only.
*
* These are used to make it possible to call a function which takes
* an Integer Constant Expression (ICE) using a compile time constant.
* Technically it would also be possible to use a value not trivially
* known by the compiler, but there would be a siginficant performance
* hit (a switch switch is used).
*
* The basic idea is pretty simple; we just emit a do while loop which
* contains a switch with a case for every possible value of the
* constant.
*
* As long as the value you pass to the function in constant, pretty
* much any copmiler shouldn't have a problem generating exactly the
* same code as if you had used an ICE.
*
* This is intended to be used in the SIMDe implementations of
* functions the compilers require to be an ICE, but the other benefit
* is that if we also disable the warnings from
* SIMDE_REQUIRE_CONSTANT_RANGE we can actually just allow the tests
* to use non-ICE parameters
*/
#if !defined(SIMDE_CONSTIFY_H)
#define SIMDE_CONSTIFY_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_
SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_
#define SIMDE_CONSTIFY_2_(func_name, result, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: result = func_name(__VA_ARGS__, 0); break; \
case 1: result = func_name(__VA_ARGS__, 1); break; \
default: result = default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_4_(func_name, result, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: result = func_name(__VA_ARGS__, 0); break; \
case 1: result = func_name(__VA_ARGS__, 1); break; \
case 2: result = func_name(__VA_ARGS__, 2); break; \
case 3: result = func_name(__VA_ARGS__, 3); break; \
default: result = default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_8_(func_name, result, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: result = func_name(__VA_ARGS__, 0); break; \
case 1: result = func_name(__VA_ARGS__, 1); break; \
case 2: result = func_name(__VA_ARGS__, 2); break; \
case 3: result = func_name(__VA_ARGS__, 3); break; \
case 4: result = func_name(__VA_ARGS__, 4); break; \
case 5: result = func_name(__VA_ARGS__, 5); break; \
case 6: result = func_name(__VA_ARGS__, 6); break; \
case 7: result = func_name(__VA_ARGS__, 7); break; \
default: result = default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_16_(func_name, result, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: result = func_name(__VA_ARGS__, 0); break; \
case 1: result = func_name(__VA_ARGS__, 1); break; \
case 2: result = func_name(__VA_ARGS__, 2); break; \
case 3: result = func_name(__VA_ARGS__, 3); break; \
case 4: result = func_name(__VA_ARGS__, 4); break; \
case 5: result = func_name(__VA_ARGS__, 5); break; \
case 6: result = func_name(__VA_ARGS__, 6); break; \
case 7: result = func_name(__VA_ARGS__, 7); break; \
case 8: result = func_name(__VA_ARGS__, 8); break; \
case 9: result = func_name(__VA_ARGS__, 9); break; \
case 10: result = func_name(__VA_ARGS__, 10); break; \
case 11: result = func_name(__VA_ARGS__, 11); break; \
case 12: result = func_name(__VA_ARGS__, 12); break; \
case 13: result = func_name(__VA_ARGS__, 13); break; \
case 14: result = func_name(__VA_ARGS__, 14); break; \
case 15: result = func_name(__VA_ARGS__, 15); break; \
default: result = default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_32_(func_name, result, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: result = func_name(__VA_ARGS__, 0); break; \
case 1: result = func_name(__VA_ARGS__, 1); break; \
case 2: result = func_name(__VA_ARGS__, 2); break; \
case 3: result = func_name(__VA_ARGS__, 3); break; \
case 4: result = func_name(__VA_ARGS__, 4); break; \
case 5: result = func_name(__VA_ARGS__, 5); break; \
case 6: result = func_name(__VA_ARGS__, 6); break; \
case 7: result = func_name(__VA_ARGS__, 7); break; \
case 8: result = func_name(__VA_ARGS__, 8); break; \
case 9: result = func_name(__VA_ARGS__, 9); break; \
case 10: result = func_name(__VA_ARGS__, 10); break; \
case 11: result = func_name(__VA_ARGS__, 11); break; \
case 12: result = func_name(__VA_ARGS__, 12); break; \
case 13: result = func_name(__VA_ARGS__, 13); break; \
case 14: result = func_name(__VA_ARGS__, 14); break; \
case 15: result = func_name(__VA_ARGS__, 15); break; \
case 16: result = func_name(__VA_ARGS__, 16); break; \
case 17: result = func_name(__VA_ARGS__, 17); break; \
case 18: result = func_name(__VA_ARGS__, 18); break; \
case 19: result = func_name(__VA_ARGS__, 19); break; \
case 20: result = func_name(__VA_ARGS__, 20); break; \
case 21: result = func_name(__VA_ARGS__, 21); break; \
case 22: result = func_name(__VA_ARGS__, 22); break; \
case 23: result = func_name(__VA_ARGS__, 23); break; \
case 24: result = func_name(__VA_ARGS__, 24); break; \
case 25: result = func_name(__VA_ARGS__, 25); break; \
case 26: result = func_name(__VA_ARGS__, 26); break; \
case 27: result = func_name(__VA_ARGS__, 27); break; \
case 28: result = func_name(__VA_ARGS__, 28); break; \
case 29: result = func_name(__VA_ARGS__, 29); break; \
case 30: result = func_name(__VA_ARGS__, 30); break; \
case 31: result = func_name(__VA_ARGS__, 31); break; \
default: result = default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_64_(func_name, result, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: result = func_name(__VA_ARGS__, 0); break; \
case 1: result = func_name(__VA_ARGS__, 1); break; \
case 2: result = func_name(__VA_ARGS__, 2); break; \
case 3: result = func_name(__VA_ARGS__, 3); break; \
case 4: result = func_name(__VA_ARGS__, 4); break; \
case 5: result = func_name(__VA_ARGS__, 5); break; \
case 6: result = func_name(__VA_ARGS__, 6); break; \
case 7: result = func_name(__VA_ARGS__, 7); break; \
case 8: result = func_name(__VA_ARGS__, 8); break; \
case 9: result = func_name(__VA_ARGS__, 9); break; \
case 10: result = func_name(__VA_ARGS__, 10); break; \
case 11: result = func_name(__VA_ARGS__, 11); break; \
case 12: result = func_name(__VA_ARGS__, 12); break; \
case 13: result = func_name(__VA_ARGS__, 13); break; \
case 14: result = func_name(__VA_ARGS__, 14); break; \
case 15: result = func_name(__VA_ARGS__, 15); break; \
case 16: result = func_name(__VA_ARGS__, 16); break; \
case 17: result = func_name(__VA_ARGS__, 17); break; \
case 18: result = func_name(__VA_ARGS__, 18); break; \
case 19: result = func_name(__VA_ARGS__, 19); break; \
case 20: result = func_name(__VA_ARGS__, 20); break; \
case 21: result = func_name(__VA_ARGS__, 21); break; \
case 22: result = func_name(__VA_ARGS__, 22); break; \
case 23: result = func_name(__VA_ARGS__, 23); break; \
case 24: result = func_name(__VA_ARGS__, 24); break; \
case 25: result = func_name(__VA_ARGS__, 25); break; \
case 26: result = func_name(__VA_ARGS__, 26); break; \
case 27: result = func_name(__VA_ARGS__, 27); break; \
case 28: result = func_name(__VA_ARGS__, 28); break; \
case 29: result = func_name(__VA_ARGS__, 29); break; \
case 30: result = func_name(__VA_ARGS__, 30); break; \
case 31: result = func_name(__VA_ARGS__, 31); break; \
case 32: result = func_name(__VA_ARGS__, 32); break; \
case 33: result = func_name(__VA_ARGS__, 33); break; \
case 34: result = func_name(__VA_ARGS__, 34); break; \
case 35: result = func_name(__VA_ARGS__, 35); break; \
case 36: result = func_name(__VA_ARGS__, 36); break; \
case 37: result = func_name(__VA_ARGS__, 37); break; \
case 38: result = func_name(__VA_ARGS__, 38); break; \
case 39: result = func_name(__VA_ARGS__, 39); break; \
case 40: result = func_name(__VA_ARGS__, 40); break; \
case 41: result = func_name(__VA_ARGS__, 41); break; \
case 42: result = func_name(__VA_ARGS__, 42); break; \
case 43: result = func_name(__VA_ARGS__, 43); break; \
case 44: result = func_name(__VA_ARGS__, 44); break; \
case 45: result = func_name(__VA_ARGS__, 45); break; \
case 46: result = func_name(__VA_ARGS__, 46); break; \
case 47: result = func_name(__VA_ARGS__, 47); break; \
case 48: result = func_name(__VA_ARGS__, 48); break; \
case 49: result = func_name(__VA_ARGS__, 49); break; \
case 50: result = func_name(__VA_ARGS__, 50); break; \
case 51: result = func_name(__VA_ARGS__, 51); break; \
case 52: result = func_name(__VA_ARGS__, 52); break; \
case 53: result = func_name(__VA_ARGS__, 53); break; \
case 54: result = func_name(__VA_ARGS__, 54); break; \
case 55: result = func_name(__VA_ARGS__, 55); break; \
case 56: result = func_name(__VA_ARGS__, 56); break; \
case 57: result = func_name(__VA_ARGS__, 57); break; \
case 58: result = func_name(__VA_ARGS__, 58); break; \
case 59: result = func_name(__VA_ARGS__, 59); break; \
case 60: result = func_name(__VA_ARGS__, 60); break; \
case 61: result = func_name(__VA_ARGS__, 61); break; \
case 62: result = func_name(__VA_ARGS__, 62); break; \
case 63: result = func_name(__VA_ARGS__, 63); break; \
default: result = default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_2_NO_RESULT_(func_name, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: func_name(__VA_ARGS__, 0); break; \
case 1: func_name(__VA_ARGS__, 1); break; \
default: default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_4_NO_RESULT_(func_name, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: func_name(__VA_ARGS__, 0); break; \
case 1: func_name(__VA_ARGS__, 1); break; \
case 2: func_name(__VA_ARGS__, 2); break; \
case 3: func_name(__VA_ARGS__, 3); break; \
default: default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_8_NO_RESULT_(func_name, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: func_name(__VA_ARGS__, 0); break; \
case 1: func_name(__VA_ARGS__, 1); break; \
case 2: func_name(__VA_ARGS__, 2); break; \
case 3: func_name(__VA_ARGS__, 3); break; \
case 4: func_name(__VA_ARGS__, 4); break; \
case 5: func_name(__VA_ARGS__, 5); break; \
case 6: func_name(__VA_ARGS__, 6); break; \
case 7: func_name(__VA_ARGS__, 7); break; \
default: default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_16_NO_RESULT_(func_name, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: func_name(__VA_ARGS__, 0); break; \
case 1: func_name(__VA_ARGS__, 1); break; \
case 2: func_name(__VA_ARGS__, 2); break; \
case 3: func_name(__VA_ARGS__, 3); break; \
case 4: func_name(__VA_ARGS__, 4); break; \
case 5: func_name(__VA_ARGS__, 5); break; \
case 6: func_name(__VA_ARGS__, 6); break; \
case 7: func_name(__VA_ARGS__, 7); break; \
case 8: func_name(__VA_ARGS__, 8); break; \
case 9: func_name(__VA_ARGS__, 9); break; \
case 10: func_name(__VA_ARGS__, 10); break; \
case 11: func_name(__VA_ARGS__, 11); break; \
case 12: func_name(__VA_ARGS__, 12); break; \
case 13: func_name(__VA_ARGS__, 13); break; \
case 14: func_name(__VA_ARGS__, 14); break; \
case 15: func_name(__VA_ARGS__, 15); break; \
default: default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_32_NO_RESULT_(func_name, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: func_name(__VA_ARGS__, 0); break; \
case 1: func_name(__VA_ARGS__, 1); break; \
case 2: func_name(__VA_ARGS__, 2); break; \
case 3: func_name(__VA_ARGS__, 3); break; \
case 4: func_name(__VA_ARGS__, 4); break; \
case 5: func_name(__VA_ARGS__, 5); break; \
case 6: func_name(__VA_ARGS__, 6); break; \
case 7: func_name(__VA_ARGS__, 7); break; \
case 8: func_name(__VA_ARGS__, 8); break; \
case 9: func_name(__VA_ARGS__, 9); break; \
case 10: func_name(__VA_ARGS__, 10); break; \
case 11: func_name(__VA_ARGS__, 11); break; \
case 12: func_name(__VA_ARGS__, 12); break; \
case 13: func_name(__VA_ARGS__, 13); break; \
case 14: func_name(__VA_ARGS__, 14); break; \
case 15: func_name(__VA_ARGS__, 15); break; \
case 16: func_name(__VA_ARGS__, 16); break; \
case 17: func_name(__VA_ARGS__, 17); break; \
case 18: func_name(__VA_ARGS__, 18); break; \
case 19: func_name(__VA_ARGS__, 19); break; \
case 20: func_name(__VA_ARGS__, 20); break; \
case 21: func_name(__VA_ARGS__, 21); break; \
case 22: func_name(__VA_ARGS__, 22); break; \
case 23: func_name(__VA_ARGS__, 23); break; \
case 24: func_name(__VA_ARGS__, 24); break; \
case 25: func_name(__VA_ARGS__, 25); break; \
case 26: func_name(__VA_ARGS__, 26); break; \
case 27: func_name(__VA_ARGS__, 27); break; \
case 28: func_name(__VA_ARGS__, 28); break; \
case 29: func_name(__VA_ARGS__, 29); break; \
case 30: func_name(__VA_ARGS__, 30); break; \
case 31: func_name(__VA_ARGS__, 31); break; \
default: default_case; break; \
} \
} while (0)
#define SIMDE_CONSTIFY_64_NO_RESULT_(func_name, default_case, imm, ...) \
do { \
switch(imm) { \
case 0: func_name(__VA_ARGS__, 0); break; \
case 1: func_name(__VA_ARGS__, 1); break; \
case 2: func_name(__VA_ARGS__, 2); break; \
case 3: func_name(__VA_ARGS__, 3); break; \
case 4: func_name(__VA_ARGS__, 4); break; \
case 5: func_name(__VA_ARGS__, 5); break; \
case 6: func_name(__VA_ARGS__, 6); break; \
case 7: func_name(__VA_ARGS__, 7); break; \
case 8: func_name(__VA_ARGS__, 8); break; \
case 9: func_name(__VA_ARGS__, 9); break; \
case 10: func_name(__VA_ARGS__, 10); break; \
case 11: func_name(__VA_ARGS__, 11); break; \
case 12: func_name(__VA_ARGS__, 12); break; \
case 13: func_name(__VA_ARGS__, 13); break; \
case 14: func_name(__VA_ARGS__, 14); break; \
case 15: func_name(__VA_ARGS__, 15); break; \
case 16: func_name(__VA_ARGS__, 16); break; \
case 17: func_name(__VA_ARGS__, 17); break; \
case 18: func_name(__VA_ARGS__, 18); break; \
case 19: func_name(__VA_ARGS__, 19); break; \
case 20: func_name(__VA_ARGS__, 20); break; \
case 21: func_name(__VA_ARGS__, 21); break; \
case 22: func_name(__VA_ARGS__, 22); break; \
case 23: func_name(__VA_ARGS__, 23); break; \
case 24: func_name(__VA_ARGS__, 24); break; \
case 25: func_name(__VA_ARGS__, 25); break; \
case 26: func_name(__VA_ARGS__, 26); break; \
case 27: func_name(__VA_ARGS__, 27); break; \
case 28: func_name(__VA_ARGS__, 28); break; \
case 29: func_name(__VA_ARGS__, 29); break; \
case 30: func_name(__VA_ARGS__, 30); break; \
case 31: func_name(__VA_ARGS__, 31); break; \
case 32: func_name(__VA_ARGS__, 32); break; \
case 33: func_name(__VA_ARGS__, 33); break; \
case 34: func_name(__VA_ARGS__, 34); break; \
case 35: func_name(__VA_ARGS__, 35); break; \
case 36: func_name(__VA_ARGS__, 36); break; \
case 37: func_name(__VA_ARGS__, 37); break; \
case 38: func_name(__VA_ARGS__, 38); break; \
case 39: func_name(__VA_ARGS__, 39); break; \
case 40: func_name(__VA_ARGS__, 40); break; \
case 41: func_name(__VA_ARGS__, 41); break; \
case 42: func_name(__VA_ARGS__, 42); break; \
case 43: func_name(__VA_ARGS__, 43); break; \
case 44: func_name(__VA_ARGS__, 44); break; \
case 45: func_name(__VA_ARGS__, 45); break; \
case 46: func_name(__VA_ARGS__, 46); break; \
case 47: func_name(__VA_ARGS__, 47); break; \
case 48: func_name(__VA_ARGS__, 48); break; \
case 49: func_name(__VA_ARGS__, 49); break; \
case 50: func_name(__VA_ARGS__, 50); break; \
case 51: func_name(__VA_ARGS__, 51); break; \
case 52: func_name(__VA_ARGS__, 52); break; \
case 53: func_name(__VA_ARGS__, 53); break; \
case 54: func_name(__VA_ARGS__, 54); break; \
case 55: func_name(__VA_ARGS__, 55); break; \
case 56: func_name(__VA_ARGS__, 56); break; \
case 57: func_name(__VA_ARGS__, 57); break; \
case 58: func_name(__VA_ARGS__, 58); break; \
case 59: func_name(__VA_ARGS__, 59); break; \
case 60: func_name(__VA_ARGS__, 60); break; \
case 61: func_name(__VA_ARGS__, 61); break; \
case 62: func_name(__VA_ARGS__, 62); break; \
case 63: func_name(__VA_ARGS__, 63); break; \
default: default_case; break; \
} \
} while (0)
HEDLEY_DIAGNOSTIC_POP
#endif
/* :: End simde-constify.h :: */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin simde-align.h :: */
/* Alignment
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* <https://creativecommons.org/publicdomain/zero/1.0/>
*
* SPDX-License-Identifier: CC0-1.0
*
**********************************************************************
*
* This is portability layer which should help iron out some
* differences across various compilers, as well as various verisons of
* C and C++.
*
* It was originally developed for SIMD Everywhere
* (<https://github.com/simd-everywhere/simde>), but since its only
* dependency is Hedley (<https://nemequ.github.io/hedley>, also CC0)
* it can easily be used in other projects, so please feel free to do
* so.
*
* If you do use this in your project, please keep a link to SIMDe in
* your code to remind you where to report any bugs and/or check for
* updated versions.
*
* # API Overview
*
* The API has several parts, and most macros have a few variations.
* There are APIs for declaring aligned fields/variables, optimization
* hints, and run-time alignment checks.
*
* Briefly, macros ending with "_TO" take numeric values and are great
* when you know the value you would like to use. Macros ending with
* "_LIKE", on the other hand, accept a type and are used when you want
* to use the alignment of a type instead of hardcoding a value.
*
* Documentation for each section of the API is inline.
*
* True to form, MSVC is the main problem and imposes several
* limitations on the effectiveness of the APIs. Detailed descriptions
* of the limitations of each macro are inline, but in general:
*
* * On C11+ or C++11+ code written using this API will work. The
* ASSUME macros may or may not generate a hint to the compiler, but
* that is only an optimization issue and will not actually cause
* failures.
* * If you're using pretty much any compiler other than MSVC,
* everything should basically work as well as in C11/C++11.
*/
#if !defined(SIMDE_ALIGN_H)
#define SIMDE_ALIGN_H
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* I know this seems a little silly, but some non-hosted compilers
* don't have stddef.h, so we try to accomodate them. */
#if !defined(SIMDE_ALIGN_SIZE_T_)
#if defined(__SIZE_TYPE__)
#define SIMDE_ALIGN_SIZE_T_ __SIZE_TYPE__
#elif defined(__SIZE_T_TYPE__)
#define SIMDE_ALIGN_SIZE_T_ __SIZE_TYPE__
#elif defined(__cplusplus)
#include <cstddef>
#define SIMDE_ALIGN_SIZE_T_ size_t
#else
#include <stddef.h>
#define SIMDE_ALIGN_SIZE_T_ size_t
#endif
#endif
#if !defined(SIMDE_ALIGN_INTPTR_T_)
#if defined(__INTPTR_TYPE__)
#define SIMDE_ALIGN_INTPTR_T_ __INTPTR_TYPE__
#elif defined(__PTRDIFF_TYPE__)
#define SIMDE_ALIGN_INTPTR_T_ __PTRDIFF_TYPE__
#elif defined(__PTRDIFF_T_TYPE__)
#define SIMDE_ALIGN_INTPTR_T_ __PTRDIFF_T_TYPE__
#elif defined(__cplusplus)
#include <cstddef>
#define SIMDE_ALIGN_INTPTR_T_ ptrdiff_t
#else
#include <stddef.h>
#define SIMDE_ALIGN_INTPTR_T_ ptrdiff_t
#endif
#endif
#if defined(SIMDE_ALIGN_DEBUG)
#if defined(__cplusplus)
#include <cstdio>
#else
#include <stdio.h>
#endif
#endif
/* SIMDE_ALIGN_OF(Type)
*
* The SIMDE_ALIGN_OF macro works like alignof, or _Alignof, or
* __alignof, or __alignof__, or __ALIGNOF__, depending on the compiler.
* It isn't defined everywhere (only when the compiler has some alignof-
* like feature we can use to implement it), but it should work in most
* modern compilers, as well as C11 and C++11.
*
* If we can't find an implementation for SIMDE_ALIGN_OF then the macro
* will not be defined, so if you can handle that situation sensibly
* you may need to sprinkle some ifdefs into your code.
*/
#if \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \
(0 && HEDLEY_HAS_FEATURE(c_alignof))
#define SIMDE_ALIGN_OF(Type) _Alignof(Type)
#elif \
(defined(__cplusplus) && (__cplusplus >= 201103L)) || \
(0 && HEDLEY_HAS_FEATURE(cxx_alignof))
#define SIMDE_ALIGN_OF(Type) alignof(Type)
#elif \
HEDLEY_GCC_VERSION_CHECK(2,95,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,24) || \
HEDLEY_PGI_VERSION_CHECK(19,10,0) || \
HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CL2000_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \
defined(__IBM__ALIGNOF__) || \
defined(__clang__)
#define SIMDE_ALIGN_OF(Type) __alignof__(Type)
#elif \
HEDLEY_IAR_VERSION_CHECK(8,40,0)
#define SIMDE_ALIGN_OF(Type) __ALIGNOF__(Type)
#elif \
HEDLEY_MSVC_VERSION_CHECK(19,0,0)
/* Probably goes back much further, but MS takes down their old docs.
* If you can verify that this works in earlier versions please let
* me know! */
#define SIMDE_ALIGN_OF(Type) __alignof(Type)
#endif
/* SIMDE_ALIGN_MAXIMUM:
*
* This is the maximum alignment that the compiler supports. You can
* define the value prior to including SIMDe if necessary, but in that
* case *please* submit an issue so we can add the platform to the
* detection code.
*
* Most compilers are okay with types which are aligned beyond what
* they think is the maximum, as long as the alignment is a power
* of two. Older versions of MSVC is the exception, so we need to cap
* the alignment requests at values that the implementation supports.
*
* XL C/C++ will accept values larger than 16 (which is the alignment
* of an AltiVec vector), but will not reliably align to the larger
* value, so so we cap the value at 16 there.
*
* If the compiler accepts any power-of-two value within reason then
* this macro should be left undefined, and the SIMDE_ALIGN_CAP
* macro will just return the value passed to it. */
#if !defined(SIMDE_ALIGN_MAXIMUM)
#if defined(HEDLEY_MSVC_VERSION)
#if HEDLEY_MSVC_VERSION_CHECK(19, 16, 0)
// Visual studio 2017 and newer does not need a max
#else
#if defined(_M_IX86) || defined(_M_AMD64)
#if HEDLEY_MSVC_VERSION_CHECK(19,14,0)
#define SIMDE_ALIGN_PLATFORM_MAXIMUM 64
#elif HEDLEY_MSVC_VERSION_CHECK(16,0,0)
/* VS 2010 is really a guess based on Wikipedia; if anyone can
* test with old VS versions I'd really appreciate it. */
#define SIMDE_ALIGN_PLATFORM_MAXIMUM 32
#else
#define SIMDE_ALIGN_PLATFORM_MAXIMUM 16
#endif
#elif defined(_M_ARM) || defined(_M_ARM64)
#define SIMDE_ALIGN_PLATFORM_MAXIMUM 8
#endif
#endif
#elif defined(HEDLEY_IBM_VERSION)
#define SIMDE_ALIGN_PLATFORM_MAXIMUM 16
#endif
#endif
/* You can mostly ignore these; they're intended for internal use.
* If you do need to use them please let me know; if they fulfill
* a common use case I'll probably drop the trailing underscore
* and make them part of the public API. */
#if defined(SIMDE_ALIGN_PLATFORM_MAXIMUM)
#if SIMDE_ALIGN_PLATFORM_MAXIMUM >= 64
#define SIMDE_ALIGN_64_ 64
#define SIMDE_ALIGN_32_ 32
#define SIMDE_ALIGN_16_ 16
#define SIMDE_ALIGN_8_ 8
#elif SIMDE_ALIGN_PLATFORM_MAXIMUM >= 32
#define SIMDE_ALIGN_64_ 32
#define SIMDE_ALIGN_32_ 32
#define SIMDE_ALIGN_16_ 16
#define SIMDE_ALIGN_8_ 8
#elif SIMDE_ALIGN_PLATFORM_MAXIMUM >= 16
#define SIMDE_ALIGN_64_ 16
#define SIMDE_ALIGN_32_ 16
#define SIMDE_ALIGN_16_ 16
#define SIMDE_ALIGN_8_ 8
#elif SIMDE_ALIGN_PLATFORM_MAXIMUM >= 8
#define SIMDE_ALIGN_64_ 8
#define SIMDE_ALIGN_32_ 8
#define SIMDE_ALIGN_16_ 8
#define SIMDE_ALIGN_8_ 8
#else
#error Max alignment expected to be >= 8
#endif
#else
#define SIMDE_ALIGN_64_ 64
#define SIMDE_ALIGN_32_ 32
#define SIMDE_ALIGN_16_ 16
#define SIMDE_ALIGN_8_ 8
#endif
/**
* SIMDE_ALIGN_CAP(Alignment)
*
* Returns the minimum of Alignment or SIMDE_ALIGN_MAXIMUM.
*/
#if defined(SIMDE_ALIGN_MAXIMUM)
#define SIMDE_ALIGN_CAP(Alignment) (((Alignment) < (SIMDE_ALIGN_PLATFORM_MAXIMUM)) ? (Alignment) : (SIMDE_ALIGN_PLATFORM_MAXIMUM))
#else
#define SIMDE_ALIGN_CAP(Alignment) (Alignment)
#endif
/* SIMDE_ALIGN_TO(Alignment)
*
* SIMDE_ALIGN_TO is used to declare types or variables. It basically
* maps to the align attribute in most compilers, the align declspec
* in MSVC, or _Alignas/alignas in C11/C++11.
*
* Example:
*
* struct i32x4 {
* SIMDE_ALIGN_TO(16) int32_t values[4];
* }
*
* Limitations:
*
* MSVC requires that the Alignment parameter be numeric; you can't do
* something like `SIMDE_ALIGN_TO(SIMDE_ALIGN_OF(int))`. This is
* unfortunate because that's really how the LIKE macros are
* implemented, and I am not aware of a way to get anything like this
* to work without using the C11/C++11 keywords.
*
* It also means that we can't use SIMDE_ALIGN_CAP to limit the
* alignment to the value specified, which MSVC also requires, so on
* MSVC you should use the `SIMDE_ALIGN_TO_8/16/32/64` macros instead.
* They work like `SIMDE_ALIGN_TO(SIMDE_ALIGN_CAP(Alignment))` would,
* but should be safe to use on MSVC.
*
* All this is to say that, if you want your code to work on MSVC, you
* should use the SIMDE_ALIGN_TO_8/16/32/64 macros below instead of
* SIMDE_ALIGN_TO(8/16/32/64).
*/
#if \
HEDLEY_HAS_ATTRIBUTE(aligned) || \
HEDLEY_GCC_VERSION_CHECK(2,95,0) || \
HEDLEY_CRAY_VERSION_CHECK(8,4,0) || \
HEDLEY_IBM_VERSION_CHECK(11,1,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_PGI_VERSION_CHECK(19,4,0) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,24) || \
HEDLEY_TI_ARMCL_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CL2000_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CL430_VERSION_CHECK(16,9,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2)
#define SIMDE_ALIGN_TO(Alignment) __attribute__((__aligned__(SIMDE_ALIGN_CAP(Alignment))))
#elif \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))
#define SIMDE_ALIGN_TO(Alignment) _Alignas(SIMDE_ALIGN_CAP(Alignment))
#elif \
(defined(__cplusplus) && (__cplusplus >= 201103L))
#define SIMDE_ALIGN_TO(Alignment) alignas(SIMDE_ALIGN_CAP(Alignment))
#elif \
defined(HEDLEY_MSVC_VERSION)
#define SIMDE_ALIGN_TO(Alignment) __declspec(align(Alignment))
/* Unfortunately MSVC can't handle __declspec(align(__alignof(Type)));
* the alignment passed to the declspec has to be an integer. */
#define SIMDE_ALIGN_OF_UNUSABLE_FOR_LIKE
#endif
#define SIMDE_ALIGN_TO_64 SIMDE_ALIGN_TO(SIMDE_ALIGN_64_)
#define SIMDE_ALIGN_TO_32 SIMDE_ALIGN_TO(SIMDE_ALIGN_32_)
#define SIMDE_ALIGN_TO_16 SIMDE_ALIGN_TO(SIMDE_ALIGN_16_)
#define SIMDE_ALIGN_TO_8 SIMDE_ALIGN_TO(SIMDE_ALIGN_8_)
/* SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment)
*
* SIMDE_ALIGN_ASSUME_TO is semantically similar to C++20's
* std::assume_aligned, or __builtin_assume_aligned. It tells the
* compiler to assume that the provided pointer is aligned to an
* `Alignment`-byte boundary.
*
* If you define SIMDE_ALIGN_DEBUG prior to including this header then
* SIMDE_ALIGN_ASSUME_TO will turn into a runtime check. We don't
* integrate with NDEBUG in this header, but it may be a good idea to
* put something like this in your code:
*
* #if !defined(NDEBUG)
* #define SIMDE_ALIGN_DEBUG
* #endif
* #include <.../simde-align.h>
*/
#if \
HEDLEY_HAS_BUILTIN(__builtin_assume_aligned) || \
HEDLEY_GCC_VERSION_CHECK(4,7,0)
#define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) \
HEDLEY_REINTERPRET_CAST(__typeof__(Pointer), __builtin_assume_aligned(HEDLEY_CONST_CAST(void*, HEDLEY_REINTERPRET_CAST(const void*, Pointer)), Alignment))
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) (__extension__ ({ \
__typeof__(v) simde_assume_aligned_t_ = (Pointer); \
__assume_aligned(simde_assume_aligned_t_, Alignment); \
simde_assume_aligned_t_; \
}))
#elif defined(__cplusplus) && (__cplusplus > 201703L)
#include <memory>
#define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) std::assume_aligned<Alignment>(Pointer)
#else
#if defined(__cplusplus)
template<typename T> HEDLEY_ALWAYS_INLINE static T* simde_align_assume_to_unchecked(T* ptr, const size_t alignment)
#else
HEDLEY_ALWAYS_INLINE static void* simde_align_assume_to_unchecked(void* ptr, const size_t alignment)
#endif
{
HEDLEY_ASSUME((HEDLEY_REINTERPRET_CAST(size_t, (ptr)) % SIMDE_ALIGN_CAP(alignment)) == 0);
return ptr;
}
#if defined(__cplusplus)
#define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) simde_align_assume_to_unchecked((Pointer), (Alignment))
#else
#define SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment) simde_align_assume_to_unchecked(HEDLEY_CONST_CAST(void*, HEDLEY_REINTERPRET_CAST(const void*, Pointer)), (Alignment))
#endif
#endif
#if !defined(SIMDE_ALIGN_DEBUG)
#define SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) SIMDE_ALIGN_ASSUME_TO_UNCHECKED(Pointer, Alignment)
#else
#include <stdio.h>
#if defined(__cplusplus)
template<typename T>
static HEDLEY_ALWAYS_INLINE
T*
simde_align_assume_to_checked_uncapped(T* ptr, const size_t alignment, const char* file, int line, const char* ptrname)
#else
static HEDLEY_ALWAYS_INLINE
void*
simde_align_assume_to_checked_uncapped(void* ptr, const size_t alignment, const char* file, int line, const char* ptrname)
#endif
{
if (HEDLEY_UNLIKELY((HEDLEY_REINTERPRET_CAST(SIMDE_ALIGN_INTPTR_T_, (ptr)) % HEDLEY_STATIC_CAST(SIMDE_ALIGN_INTPTR_T_, SIMDE_ALIGN_CAP(alignment))) != 0)) {
fprintf(stderr, "%s:%d: alignment check failed for `%s' (%p %% %u == %u)\n",
file, line, ptrname, HEDLEY_REINTERPRET_CAST(const void*, ptr),
HEDLEY_STATIC_CAST(unsigned int, SIMDE_ALIGN_CAP(alignment)),
HEDLEY_STATIC_CAST(unsigned int, HEDLEY_REINTERPRET_CAST(SIMDE_ALIGN_INTPTR_T_, (ptr)) % HEDLEY_STATIC_CAST(SIMDE_ALIGN_INTPTR_T_, SIMDE_ALIGN_CAP(alignment))));
}
return ptr;
}
#if defined(__cplusplus)
#define SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) simde_align_assume_to_checked_uncapped((Pointer), (Alignment), __FILE__, __LINE__, #Pointer)
#else
#define SIMDE_ALIGN_ASSUME_TO(Pointer, Alignment) simde_align_assume_to_checked_uncapped(HEDLEY_CONST_CAST(void*, HEDLEY_REINTERPRET_CAST(const void*, Pointer)), (Alignment), __FILE__, __LINE__, #Pointer)
#endif
#endif
/* SIMDE_ALIGN_LIKE(Type)
* SIMDE_ALIGN_LIKE_#(Type)
*
* The SIMDE_ALIGN_LIKE macros are similar to the SIMDE_ALIGN_TO macros
* except instead of an integer they take a type; basically, it's just
* a more convenient way to do something like:
*
* SIMDE_ALIGN_TO(SIMDE_ALIGN_OF(Type))
*
* The versions with a numeric suffix will fall back on using a numeric
* value in the event we can't use SIMDE_ALIGN_OF(Type). This is
* mainly for MSVC, where __declspec(align()) can't handle anything
* other than hard-coded numeric values.
*/
#if defined(SIMDE_ALIGN_OF) && defined(SIMDE_ALIGN_TO) && !defined(SIMDE_ALIGN_OF_UNUSABLE_FOR_LIKE)
#define SIMDE_ALIGN_LIKE(Type) SIMDE_ALIGN_TO(SIMDE_ALIGN_OF(Type))
#define SIMDE_ALIGN_LIKE_64(Type) SIMDE_ALIGN_LIKE(Type)
#define SIMDE_ALIGN_LIKE_32(Type) SIMDE_ALIGN_LIKE(Type)
#define SIMDE_ALIGN_LIKE_16(Type) SIMDE_ALIGN_LIKE(Type)
#define SIMDE_ALIGN_LIKE_8(Type) SIMDE_ALIGN_LIKE(Type)
#else
#define SIMDE_ALIGN_LIKE_64(Type) SIMDE_ALIGN_TO_64
#define SIMDE_ALIGN_LIKE_32(Type) SIMDE_ALIGN_TO_32
#define SIMDE_ALIGN_LIKE_16(Type) SIMDE_ALIGN_TO_16
#define SIMDE_ALIGN_LIKE_8(Type) SIMDE_ALIGN_TO_8
#endif
/* SIMDE_ALIGN_ASSUME_LIKE(Pointer, Type)
*
* Tihs is similar to SIMDE_ALIGN_ASSUME_TO, except that it takes a
* type instead of a numeric value. */
#if defined(SIMDE_ALIGN_OF) && defined(SIMDE_ALIGN_ASSUME_TO)
#define SIMDE_ALIGN_ASSUME_LIKE(Pointer, Type) SIMDE_ALIGN_ASSUME_TO(Pointer, SIMDE_ALIGN_OF(Type))
#endif
/* SIMDE_ALIGN_CAST(Type, Pointer)
*
* SIMDE_ALIGN_CAST is like C++'s reinterpret_cast, but it will try
* to silence warnings that some compilers may produce if you try
* to assign to a type with increased alignment requirements.
*
* Note that it does *not* actually attempt to tell the compiler that
* the pointer is aligned like the destination should be; that's the
* job of the next macro. This macro is necessary for stupid APIs
* like _mm_loadu_si128 where the input is a __m128i* but the function
* is specifically for data which isn't necessarily aligned to
* _Alignof(__m128i).
*/
#if HEDLEY_HAS_WARNING("-Wcast-align") || defined(__clang__) || HEDLEY_GCC_VERSION_CHECK(3,4,0)
#define SIMDE_ALIGN_CAST(Type, Pointer) (__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("GCC diagnostic ignored \"-Wcast-align\"") \
Type simde_r_ = HEDLEY_REINTERPRET_CAST(Type, Pointer); \
HEDLEY_DIAGNOSTIC_POP \
simde_r_; \
}))
#else
#define SIMDE_ALIGN_CAST(Type, Pointer) HEDLEY_REINTERPRET_CAST(Type, Pointer)
#endif
/* SIMDE_ALIGN_ASSUME_CAST(Type, Pointer)
*
* This is sort of like a combination of a reinterpret_cast and a
* SIMDE_ALIGN_ASSUME_LIKE. It uses SIMDE_ALIGN_ASSUME_LIKE to tell
* the compiler that the pointer is aligned like the specified type
* and casts the pointer to the specified type while suppressing any
* warnings from the compiler about casting to a type with greater
* alignment requirements.
*/
#define SIMDE_ALIGN_ASSUME_CAST(Type, Pointer) SIMDE_ALIGN_ASSUME_LIKE(SIMDE_ALIGN_CAST(Type, Pointer), Type)
#endif /* !defined(SIMDE_ALIGN_H) */
/* :: End simde-align.h :: */
/* In some situations, SIMDe has to make large performance sacrifices
* for small increases in how faithfully it reproduces an API, but
* only a relatively small number of users will actually need the API
* to be completely accurate. The SIMDE_FAST_* options can be used to
* disable these trade-offs.
*
* They can be enabled by passing -DSIMDE_FAST_MATH to the compiler, or
* the individual defines (e.g., -DSIMDE_FAST_NANS) if you only want to
* enable some optimizations. Using -ffast-math and/or
* -ffinite-math-only will also enable the relevant options. If you
* don't want that you can pass -DSIMDE_NO_FAST_* to disable them. */
/* Most programs avoid NaNs by never passing values which can result in
* a NaN; for example, if you only pass non-negative values to the sqrt
* functions, it won't generate a NaN. On some platforms, similar
* functions handle NaNs differently; for example, the _mm_min_ps SSE
* function will return 0.0 if you pass it (0.0, NaN), but the NEON
* vminq_f32 function will return NaN. Making them behave like one
* another is expensive; it requires generating a mask of all lanes
* with NaNs, then performing the operation (e.g., vminq_f32), then
* blending together the result with another vector using the mask.
*
* If you don't want SIMDe to worry about the differences between how
* NaNs are handled on the two platforms, define this (or pass
* -ffinite-math-only) */
#if !defined(SIMDE_FAST_MATH) && !defined(SIMDE_NO_FAST_MATH) && defined(__FAST_MATH__)
#define SIMDE_FAST_MATH
#endif
#if !defined(SIMDE_FAST_NANS) && !defined(SIMDE_NO_FAST_NANS)
#if defined(SIMDE_FAST_MATH)
#define SIMDE_FAST_NANS
#elif defined(__FINITE_MATH_ONLY__)
#if __FINITE_MATH_ONLY__
#define SIMDE_FAST_NANS
#endif
#endif
#endif
/* Many functions are defined as using the current rounding mode
* (i.e., the SIMD version of fegetround()) when converting to
* an integer. For example, _mm_cvtpd_epi32. Unfortunately,
* on some platforms (such as ARMv8+ where round-to-nearest is
* always used, regardless of the FPSCR register) this means we
* have to first query the current rounding mode, then choose
* the proper function (rounnd
, ceil, floor, etc.) */
#if !defined(SIMDE_FAST_ROUND_MODE) && !defined(SIMDE_NO_FAST_ROUND_MODE) && defined(SIMDE_FAST_MATH)
#define SIMDE_FAST_ROUND_MODE
#endif
/* This controls how ties are rounded. For example, does 10.5 round to
* 10 or 11? IEEE 754 specifies round-towards-even, but ARMv7 (for
* example) doesn't support it and it must be emulated (which is rather
* slow). If you're okay with just using the default for whatever arch
* you're on, you should definitely define this.
*
* Note that we don't use this macro to avoid correct implementations
* in functions which are explicitly about rounding (such as vrnd* on
* NEON, _mm_round_* on x86, etc.); it is only used for code where
* rounding is a component in another function, and even then it isn't
* usually a problem since such functions will use the current rounding
* mode. */
#if !defined(SIMDE_FAST_ROUND_TIES) && !defined(SIMDE_NO_FAST_ROUND_TIES) && defined(SIMDE_FAST_MATH)
#define SIMDE_FAST_ROUND_TIES
#endif
/* For functions which convert from one type to another (mostly from
* floating point to integer types), sometimes we need to do a range
* check and potentially return a different result if the value
* falls outside that range. Skipping this check can provide a
* performance boost, at the expense of faithfulness to the API we're
* emulating. */
#if !defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_NO_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_MATH)
#define SIMDE_FAST_CONVERSION_RANGE
#endif
/* Due to differences across platforms, sometimes it can be much
* faster for us to allow spurious floating point exceptions,
* or to no generate them when we should. */
#if !defined(SIMDE_FAST_EXCEPTIONS) && !defined(SIMDE_NO_FAST_EXCEPTIONS) && defined(SIMDE_FAST_MATH)
#define SIMDE_FAST_EXCEPTIONS
#endif
#if \
HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \
HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
(HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \
HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define SIMDE_CHECK_CONSTANT_(expr) (__builtin_constant_p(expr))
#elif defined(__cplusplus) && (__cplusplus > 201703L)
#include <type_traits>
#define SIMDE_CHECK_CONSTANT_(expr) (std::is_constant_evaluated())
#endif
#if !defined(SIMDE_NO_CHECK_IMMEDIATE_CONSTANT)
#if defined(SIMDE_CHECK_CONSTANT_) && \
SIMDE_DETECT_CLANG_VERSION_CHECK(9,0,0) && \
(!defined(__apple_build_version__) || ((__apple_build_version__ < 11000000) || (__apple_build_version__ >= 12000000)))
#define SIMDE_REQUIRE_CONSTANT(arg) HEDLEY_REQUIRE_MSG(SIMDE_CHECK_CONSTANT_(arg), "`" #arg "' must be constant")
#else
#define SIMDE_REQUIRE_CONSTANT(arg)
#endif
#else
#define SIMDE_REQUIRE_CONSTANT(arg)
#endif
#define SIMDE_REQUIRE_RANGE(arg, min, max) \
HEDLEY_REQUIRE_MSG((((arg) >= (min)) && ((arg) <= (max))), "'" #arg "' must be in [" #min ", " #max "]")
#define SIMDE_REQUIRE_CONSTANT_RANGE(arg, min, max) \
SIMDE_REQUIRE_CONSTANT(arg) \
SIMDE_REQUIRE_RANGE(arg, min, max)
/* A copy of HEDLEY_STATIC_ASSERT, except we don't define an empty
* fallback if we can't find an implementation; instead we have to
* check if SIMDE_STATIC_ASSERT is defined before using it. */
#if \
!defined(__cplusplus) && ( \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \
HEDLEY_HAS_FEATURE(c_static_assert) || \
HEDLEY_GCC_VERSION_CHECK(6,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
defined(_Static_assert) \
)
# define SIMDE_STATIC_ASSERT(expr, message) _Static_assert(expr, message)
#elif \
(defined(__cplusplus) && (__cplusplus >= 201103L)) || \
HEDLEY_MSVC_VERSION_CHECK(16,0,0)
# define SIMDE_STATIC_ASSERT(expr, message) HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message))
#endif
/* Statement exprs */
#if \
HEDLEY_GNUC_VERSION_CHECK(2,95,0) || \
HEDLEY_TINYC_VERSION_CHECK(0,9,26) || \
HEDLEY_INTEL_VERSION_CHECK(9,0,0) || \
HEDLEY_PGI_VERSION_CHECK(18,10,0) || \
HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) || \
HEDLEY_IBM_VERSION_CHECK(11,1,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define SIMDE_STATEMENT_EXPR_(expr) (__extension__ expr)
#endif
/* This is just a convenience macro to make it easy to call a single
* function with a specific diagnostic disabled. */
#if defined(SIMDE_STATEMENT_EXPR_)
#define SIMDE_DISABLE_DIAGNOSTIC_EXPR_(diagnostic, expr) \
SIMDE_STATEMENT_EXPR_(({ \
HEDLEY_DIAGNOSTIC_PUSH \
diagnostic \
(expr); \
HEDLEY_DIAGNOSTIC_POP \
}))
#endif
#if defined(SIMDE_CHECK_CONSTANT_) && defined(SIMDE_STATIC_ASSERT)
#define SIMDE_ASSERT_CONSTANT_(v) SIMDE_STATIC_ASSERT(SIMDE_CHECK_CONSTANT_(v), #v " must be constant.")
#endif
#if \
(HEDLEY_HAS_ATTRIBUTE(may_alias) && !defined(HEDLEY_SUNPRO_VERSION)) || \
HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0)
# define SIMDE_MAY_ALIAS __attribute__((__may_alias__))
#else
# define SIMDE_MAY_ALIAS
#endif
/* Lots of compilers support GCC-style vector extensions, but many
don't support all the features. Define different macros depending
on support for
* SIMDE_VECTOR - Declaring a vector.
* SIMDE_VECTOR_OPS - basic operations (binary and unary).
* SIMDE_VECTOR_NEGATE - negating a vector
* SIMDE_VECTOR_SCALAR - For binary operators, the second argument
can be a scalar, in which case the result is as if that scalar
had been broadcast to all lanes of a vector.
* SIMDE_VECTOR_SUBSCRIPT - Supports array subscript notation for
extracting/inserting a single element.=
SIMDE_VECTOR can be assumed if any others are defined, the
others are independent. */
#if !defined(SIMDE_NO_VECTOR)
# if \
HEDLEY_GCC_VERSION_CHECK(4,8,0)
# define SIMDE_VECTOR(size) __attribute__((__vector_size__(size)))
# define SIMDE_VECTOR_OPS
# define SIMDE_VECTOR_NEGATE
# define SIMDE_VECTOR_SCALAR
# define SIMDE_VECTOR_SUBSCRIPT
# elif HEDLEY_INTEL_VERSION_CHECK(16,0,0)
# define SIMDE_VECTOR(size) __attribute__((__vector_size__(size)))
# define SIMDE_VECTOR_OPS
# define SIMDE_VECTOR_NEGATE
/* ICC only supports SIMDE_VECTOR_SCALAR for constants */
# define SIMDE_VECTOR_SUBSCRIPT
# elif \
HEDLEY_GCC_VERSION_CHECK(4,1,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
# define SIMDE_VECTOR(size) __attribute__((__vector_size__(size)))
# define SIMDE_VECTOR_OPS
# elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0)
# define SIMDE_VECTOR(size) __attribute__((__vector_size__(size)))
# elif HEDLEY_HAS_ATTRIBUTE(vector_size)
# define SIMDE_VECTOR(size) __attribute__((__vector_size__(size)))
# define SIMDE_VECTOR_OPS
# define SIMDE_VECTOR_NEGATE
# define SIMDE_VECTOR_SUBSCRIPT
# if SIMDE_DETECT_CLANG_VERSION_CHECK(5,0,0)
# define SIMDE_VECTOR_SCALAR
# endif
# endif
/* GCC and clang have built-in functions to handle shuffling and
converting of vectors, but the implementations are slightly
different. This macro is just an abstraction over them. Note that
elem_size is in bits but vec_size is in bytes. */
# if !defined(SIMDE_NO_SHUFFLE_VECTOR) && defined(SIMDE_VECTOR_SUBSCRIPT)
HEDLEY_DIAGNOSTIC_PUSH
/* We don't care about -Wvariadic-macros; all compilers that support
* shufflevector/shuffle support them. */
# if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
# pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
# endif
# if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0)
# pragma GCC diagnostic ignored "-Wvariadic-macros"
# endif
# if HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
# define SIMDE_SHUFFLE_VECTOR_(elem_size, vec_size, a, b, ...) __builtin_shufflevector(a, b, __VA_ARGS__)
# elif HEDLEY_GCC_HAS_BUILTIN(__builtin_shuffle,4,7,0) && !defined(__INTEL_COMPILER)
# define SIMDE_SHUFFLE_VECTOR_(elem_size, vec_size, a, b, ...) (__extension__ ({ \
int##elem_size##_t SIMDE_VECTOR(vec_size) simde_shuffle_ = { __VA_ARGS__ }; \
__builtin_shuffle(a, b, simde_shuffle_); \
}))
# endif
HEDLEY_DIAGNOSTIC_POP
# endif
/* TODO: this actually works on XL C/C++ without SIMDE_VECTOR_SUBSCRIPT
but the code needs to be refactored a bit to take advantage. */
# if !defined(SIMDE_NO_CONVERT_VECTOR) && defined(SIMDE_VECTOR_SUBSCRIPT)
# if HEDLEY_HAS_BUILTIN(__builtin_convertvector) || HEDLEY_GCC_VERSION_CHECK(9,0,0)
# if HEDLEY_GCC_VERSION_CHECK(9,0,0) && !HEDLEY_GCC_VERSION_CHECK(9,3,0)
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93557 */
# define SIMDE_CONVERT_VECTOR_(to, from) ((to) = (__extension__({ \
__typeof__(from) from_ = (from); \
((void) from_); \
__builtin_convertvector(from_, __typeof__(to)); \
})))
# else
# define SIMDE_CONVERT_VECTOR_(to, from) ((to) = __builtin_convertvector((from), __typeof__(to)))
# endif
# endif
# endif
#endif
/* Since we currently require SUBSCRIPT before using a vector in a
union, we define these as dependencies of SUBSCRIPT. They are
likely to disappear in the future, once SIMDe learns how to make
use of vectors without using the union members. Do not use them
in your code unless you're okay with it breaking when SIMDe
changes. */
#if defined(SIMDE_VECTOR_SUBSCRIPT)
# if defined(SIMDE_VECTOR_OPS)
# define SIMDE_VECTOR_SUBSCRIPT_OPS
# endif
# if defined(SIMDE_VECTOR_SCALAR)
# define SIMDE_VECTOR_SUBSCRIPT_SCALAR
# endif
#endif
#if !defined(SIMDE_DISABLE_OPENMP)
#if !defined(SIMDE_ENABLE_OPENMP) && ((defined(_OPENMP) && (_OPENMP >= 201307L)) || (defined(_OPENMP_SIMD) && (_OPENMP_SIMD >= 201307L))) || defined(HEDLEY_MCST_LCC_VERSION)
#define SIMDE_ENABLE_OPENMP
#endif
#endif
#if !defined(SIMDE_ENABLE_CILKPLUS) && (defined(__cilk) || defined(HEDLEY_INTEL_VERSION))
# define SIMDE_ENABLE_CILKPLUS
#endif
#if defined(SIMDE_ENABLE_OPENMP)
# define SIMDE_VECTORIZE HEDLEY_PRAGMA(omp simd)
# define SIMDE_VECTORIZE_SAFELEN(l) HEDLEY_PRAGMA(omp simd safelen(l))
# if defined(__clang__)
# define SIMDE_VECTORIZE_REDUCTION(r) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wsign-conversion\"") \
HEDLEY_PRAGMA(omp simd reduction(r)) \
HEDLEY_DIAGNOSTIC_POP
# else
# define SIMDE_VECTORIZE_REDUCTION(r) HEDLEY_PRAGMA(omp simd reduction(r))
# endif
# if !defined(HEDLEY_MCST_LCC_VERSION)
# define SIMDE_VECTORIZE_ALIGNED(a) HEDLEY_PRAGMA(omp simd aligned(a))
# else
# define SIMDE_VECTORIZE_ALIGNED(a) HEDLEY_PRAGMA(omp simd)
# endif
#elif defined(SIMDE_ENABLE_CILKPLUS)
# define SIMDE_VECTORIZE HEDLEY_PRAGMA(simd)
# define SIMDE_VECTORIZE_SAFELEN(l) HEDLEY_PRAGMA(simd vectorlength(l))
# define SIMDE_VECTORIZE_REDUCTION(r) HEDLEY_PRAGMA(simd reduction(r))
# define SIMDE_VECTORIZE_ALIGNED(a) HEDLEY_PRAGMA(simd aligned(a))
#elif defined(__clang__) && !defined(HEDLEY_IBM_VERSION)
# define SIMDE_VECTORIZE HEDLEY_PRAGMA(clang loop vectorize(enable))
# define SIMDE_VECTORIZE_SAFELEN(l) HEDLEY_PRAGMA(clang loop vectorize_width(l))
# define SIMDE_VECTORIZE_REDUCTION(r) SIMDE_VECTORIZE
# define SIMDE_VECTORIZE_ALIGNED(a)
#elif HEDLEY_GCC_VERSION_CHECK(4,9,0)
# define SIMDE_VECTORIZE HEDLEY_PRAGMA(GCC ivdep)
# define SIMDE_VECTORIZE_SAFELEN(l) SIMDE_VECTORIZE
# define SIMDE_VECTORIZE_REDUCTION(r) SIMDE_VECTORIZE
# define SIMDE_VECTORIZE_ALIGNED(a)
#elif HEDLEY_CRAY_VERSION_CHECK(5,0,0)
# define SIMDE_VECTORIZE HEDLEY_PRAGMA(_CRI ivdep)
# define SIMDE_VECTORIZE_SAFELEN(l) SIMDE_VECTORIZE
# define SIMDE_VECTORIZE_REDUCTION(r) SIMDE_VECTORIZE
# define SIMDE_VECTORIZE_ALIGNED(a)
#else
# define SIMDE_VECTORIZE
# define SIMDE_VECTORIZE_SAFELEN(l)
# define SIMDE_VECTORIZE_REDUCTION(r)
# define SIMDE_VECTORIZE_ALIGNED(a)
#endif
#define SIMDE_MASK_NZ_(v, mask) (((v) & (mask)) | !((v) & (mask)))
/* Intended for checking coverage, you should never use this in
production. */
#if defined(SIMDE_NO_INLINE)
# define SIMDE_FUNCTION_ATTRIBUTES HEDLEY_NEVER_INLINE static
#else
# define SIMDE_FUNCTION_ATTRIBUTES HEDLEY_ALWAYS_INLINE static
#endif
#if defined(SIMDE_NO_INLINE)
# define SIMDE_HUGE_FUNCTION_ATTRIBUTES HEDLEY_NEVER_INLINE static
#elif defined(SIMDE_CONSTRAINED_COMPILATION)
# define SIMDE_HUGE_FUNCTION_ATTRIBUTES static
#else
# define SIMDE_HUGE_FUNCTION_ATTRIBUTES HEDLEY_ALWAYS_INLINE static
#endif
#if \
HEDLEY_HAS_ATTRIBUTE(unused) || \
HEDLEY_GCC_VERSION_CHECK(2,95,0)
# define SIMDE_FUNCTION_POSSIBLY_UNUSED_ __attribute__((__unused__))
#else
# define SIMDE_FUNCTION_POSSIBLY_UNUSED_
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_
#if defined(_MSC_VER)
# define SIMDE_BEGIN_DECLS_ HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(disable:4996 4204)) HEDLEY_BEGIN_C_DECLS
# define SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP HEDLEY_END_C_DECLS
#else
# define SIMDE_BEGIN_DECLS_ \
HEDLEY_DIAGNOSTIC_PUSH \
SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \
HEDLEY_BEGIN_C_DECLS
# define SIMDE_END_DECLS_ \
HEDLEY_END_C_DECLS \
HEDLEY_DIAGNOSTIC_POP
#endif
#if defined(__SIZEOF_INT128__)
# define SIMDE_HAVE_INT128_
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_
typedef __int128 simde_int128;
typedef unsigned __int128 simde_uint128;
HEDLEY_DIAGNOSTIC_POP
#endif
#if !defined(SIMDE_ENDIAN_LITTLE)
# define SIMDE_ENDIAN_LITTLE 1234
#endif
#if !defined(SIMDE_ENDIAN_BIG)
# define SIMDE_ENDIAN_BIG 4321
#endif
#if !defined(SIMDE_ENDIAN_ORDER)
/* GCC (and compilers masquerading as GCC) define __BYTE_ORDER__. */
# if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
/* TI defines _BIG_ENDIAN or _LITTLE_ENDIAN */
# elif defined(_BIG_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
# elif defined(_LITTLE_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
/* We know the endianness of some common architectures. Common
* architectures not listed (ARM, POWER, MIPS, etc.) here are
* bi-endian. */
# elif defined(__amd64) || defined(_M_X64) || defined(__i386) || defined(_M_IX86)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(__s390x__) || defined(__zarch__)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
/* Looks like we'll have to rely on the platform. If we're missing a
* platform, please let us know. */
# elif defined(_WIN32)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(sun) || defined(__sun) /* Solaris */
# include <sys/byteorder.h>
# if defined(_LITTLE_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(_BIG_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
# endif
# elif defined(__APPLE__)
# include <libkern/OSByteOrder.h>
# if defined(__LITTLE_ENDIAN__)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(__BIG_ENDIAN__)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
# endif
# elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__) || defined(BSD)
# include <machine/endian.h>
# if defined(__BYTE_ORDER) && (__BYTE_ORDER == __LITTLE_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
# endif
# elif defined(__linux__) || defined(__linux) || defined(__gnu_linux__)
# include <endian.h>
# if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && (__BYTE_ORDER == __LITTLE_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_LITTLE
# elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && (__BYTE_ORDER == __BIG_ENDIAN)
# define SIMDE_ENDIAN_ORDER SIMDE_ENDIAN_BIG
# endif
# endif
#endif
#if \
HEDLEY_HAS_BUILTIN(__builtin_bswap64) || \
HEDLEY_GCC_VERSION_CHECK(4,3,0) || \
HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define simde_bswap64(v) __builtin_bswap64(v)
#elif HEDLEY_MSVC_VERSION_CHECK(13,10,0)
#define simde_bswap64(v) _byteswap_uint64(v)
#else
SIMDE_FUNCTION_ATTRIBUTES
uint64_t
simde_bswap64(uint64_t v) {
return
((v & (((uint64_t) 0xff) << 56)) >> 56) |
((v & (((uint64_t) 0xff) << 48)) >> 40) |
((v & (((uint64_t) 0xff) << 40)) >> 24) |
((v & (((uint64_t) 0xff) << 32)) >> 8) |
((v & (((uint64_t) 0xff) << 24)) << 8) |
((v & (((uint64_t) 0xff) << 16)) << 24) |
((v & (((uint64_t) 0xff) << 8)) << 40) |
((v & (((uint64_t) 0xff) )) << 56);
}
#endif
#if !defined(SIMDE_ENDIAN_ORDER)
# error Unknown byte order; please file a bug
#else
# if SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE
# define simde_endian_bswap64_be(value) simde_bswap64(value)
# define simde_endian_bswap64_le(value) (value)
# elif SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG
# define simde_endian_bswap64_be(value) (value)
# define simde_endian_bswap64_le(value) simde_bswap64(value)
# endif
#endif
/* TODO: we should at least make an attempt to detect the correct
types for simde_float32/float64 instead of just assuming float and
double. */
#if !defined(SIMDE_FLOAT32_TYPE)
# define SIMDE_FLOAT32_TYPE float
# define SIMDE_FLOAT32_C(value) value##f
#else
# define SIMDE_FLOAT32_C(value) ((SIMDE_FLOAT32_TYPE) value)
#endif
typedef SIMDE_FLOAT32_TYPE simde_float32;
#if !defined(SIMDE_FLOAT64_TYPE)
# define SIMDE_FLOAT64_TYPE double
# define SIMDE_FLOAT64_C(value) value
#else
# define SIMDE_FLOAT64_C(value) ((SIMDE_FLOAT64_TYPE) value)
#endif
typedef SIMDE_FLOAT64_TYPE simde_float64;
#if defined(__cplusplus)
typedef bool simde_bool;
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
typedef _Bool simde_bool;
#elif defined(bool)
typedef bool simde_bool;
#else
#include <stdbool.h>
typedef bool simde_bool;
#endif
#if HEDLEY_HAS_WARNING("-Wbad-function-cast")
# define SIMDE_CONVERT_FTOI(T,v) \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wbad-function-cast\"") \
HEDLEY_STATIC_CAST(T, (v)) \
HEDLEY_DIAGNOSTIC_POP
#else
# define SIMDE_CONVERT_FTOI(T,v) ((T) (v))
#endif
/* TODO: detect compilers which support this outside of C11 mode */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
#define SIMDE_CHECKED_REINTERPRET_CAST(to, from, value) _Generic((value), to: (value), default: (_Generic((value), from: ((to) (value)))))
#define SIMDE_CHECKED_STATIC_CAST(to, from, value) _Generic((value), to: (value), default: (_Generic((value), from: ((to) (value)))))
#else
#define SIMDE_CHECKED_REINTERPRET_CAST(to, from, value) HEDLEY_REINTERPRET_CAST(to, value)
#define SIMDE_CHECKED_STATIC_CAST(to, from, value) HEDLEY_STATIC_CAST(to, value)
#endif
#if HEDLEY_HAS_WARNING("-Wfloat-equal")
# define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL _Pragma("clang diagnostic ignored \"-Wfloat-equal\"")
#elif HEDLEY_GCC_VERSION_CHECK(3,0,0)
# define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#else
# define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL
#endif
/* Some functions can trade accuracy for speed. For those functions
you can control the trade-off using this macro. Possible values:
0: prefer speed
1: reasonable trade-offs
2: prefer accuracy */
#if !defined(SIMDE_ACCURACY_PREFERENCE)
# define SIMDE_ACCURACY_PREFERENCE 1
#endif
#if defined(__STDC_HOSTED__)
# define SIMDE_STDC_HOSTED __STDC_HOSTED__
#else
# if \
defined(HEDLEY_PGI_VERSION) || \
defined(HEDLEY_MSVC_VERSION)
# define SIMDE_STDC_HOSTED 1
# else
# define SIMDE_STDC_HOSTED 0
# endif
#endif
/* Try to deal with environments without a standard library. */
#if !defined(simde_memcpy)
#if HEDLEY_HAS_BUILTIN(__builtin_memcpy)
#define simde_memcpy(dest, src, n) __builtin_memcpy(dest, src, n)
#endif
#endif
#if !defined(simde_memset)
#if HEDLEY_HAS_BUILTIN(__builtin_memset)
#define simde_memset(s, c, n) __builtin_memset(s, c, n)
#endif
#endif
#if !defined(simde_memcmp)
#if HEDLEY_HAS_BUILTIN(__builtin_memcmp)
#define simde_memcmp(s1, s2, n) __builtin_memcmp(s1, s2, n)
#endif
#endif
#if !defined(simde_memcpy) || !defined(simde_memset) || !defined(simde_memcmp)
#if !defined(SIMDE_NO_STRING_H)
#if defined(__has_include)
#if !__has_include(<string.h>)
#define SIMDE_NO_STRING_H
#endif
#elif (SIMDE_STDC_HOSTED == 0)
#define SIMDE_NO_STRING_H
#endif
#endif
#if !defined(SIMDE_NO_STRING_H)
#include <string.h>
#if !defined(simde_memcpy)
#define simde_memcpy(dest, src, n) memcpy(dest, src, n)
#endif
#if !defined(simde_memset)
#define simde_memset(s, c, n) memset(s, c, n)
#endif
#if !defined(simde_memcmp)
#define simde_memcmp(s1, s2, n) memcmp(s1, s2, n)
#endif
#else
/* These are meant to be portable, not fast. If you're hitting them you
* should think about providing your own (by defining the simde_memcpy
* macro prior to including any SIMDe files) or submitting a patch to
* SIMDe so we can detect your system-provided memcpy/memset, like by
* adding your compiler to the checks for __builtin_memcpy and/or
* __builtin_memset. */
#if !defined(simde_memcpy)
SIMDE_FUNCTION_ATTRIBUTES
void
simde_memcpy_(void* dest, const void* src, size_t len) {
char* dest_ = HEDLEY_STATIC_CAST(char*, dest);
char* src_ = HEDLEY_STATIC_CAST(const char*, src);
for (size_t i = 0 ; i < len ; i++) {
dest_[i] = src_[i];
}
}
#define simde_memcpy(dest, src, n) simde_memcpy_(dest, src, n)
#endif
#if !defined(simde_memset)
SIMDE_FUNCTION_ATTRIBUTES
void
simde_memset_(void* s, int c, size_t len) {
char* s_ = HEDLEY_STATIC_CAST(char*, s);
char c_ = HEDLEY_STATIC_CAST(char, c);
for (size_t i = 0 ; i < len ; i++) {
s_[i] = c_[i];
}
}
#define simde_memset(s, c, n) simde_memset_(s, c, n)
#endif
#if !defined(simde_memcmp)
SIMDE_FUCTION_ATTRIBUTES
int
simde_memcmp_(const void *s1, const void *s2, size_t n) {
unsigned char* s1_ = HEDLEY_STATIC_CAST(unsigned char*, s1);
unsigned char* s2_ = HEDLEY_STATIC_CAST(unsigned char*, s2);
for (size_t i = 0 ; i < len ; i++) {
if (s1_[i] != s2_[i]) {
return (int) (s1_[i] - s2_[i]);
}
}
return 0;
}
#define simde_memcmp(s1, s2, n) simde_memcmp_(s1, s2, n)
#endif
#endif
#endif
#if defined(FE_ALL_EXCEPT)
#define SIMDE_HAVE_FENV_H
#elif defined(__has_include)
#if __has_include(<fenv.h>)
#include <fenv.h>
#define SIMDE_HAVE_FENV_H
#endif
#elif SIMDE_STDC_HOSTED == 1
#include <fenv.h>
#define SIMDE_HAVE_FENV_H
#endif
#if defined(EXIT_FAILURE)
#define SIMDE_HAVE_STDLIB_H
#elif defined(__has_include)
#if __has_include(<stdlib.h>)
#include <stdlib.h>
#define SIMDE_HAVE_STDLIB_H
#endif
#elif SIMDE_STDC_HOSTED == 1
#include <stdlib.h>
#define SIMDE_HAVE_STDLIB_H
#endif
#if defined(__has_include)
# if defined(__cplusplus) && (__cplusplus >= 201103L) && __has_include(<cfenv>)
# include <cfenv>
# elif __has_include(<fenv.h>)
# include <fenv.h>
# endif
# if __has_include(<stdlib.h>)
# include <stdlib.h>
# endif
#elif SIMDE_STDC_HOSTED == 1
# include <stdlib.h>
# include <fenv.h>
#endif
#define SIMDE_DEFINE_CONVERSION_FUNCTION_(Name, T_To, T_From) \
static HEDLEY_ALWAYS_INLINE HEDLEY_CONST SIMDE_FUNCTION_POSSIBLY_UNUSED_ \
T_To \
Name (T_From value) { \
T_To r; \
simde_memcpy(&r, &value, sizeof(r)); \
return r; \
}
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float32_as_uint32, uint32_t, simde_float32)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint32_as_float32, simde_float32, uint32_t)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_float64_as_uint64, uint64_t, simde_float64)
SIMDE_DEFINE_CONVERSION_FUNCTION_(simde_uint64_as_float64, simde_float64, uint64_t)
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin check.h :: */
/* Check (assertions)
* Portable Snippets - https://gitub.com/nemequ/portable-snippets
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* https://creativecommons.org/publicdomain/zero/1.0/
*
* SPDX-License-Identifier: CC0-1.0
*/
#if !defined(SIMDE_CHECK_H)
#define SIMDE_CHECK_H
#if !defined(SIMDE_NDEBUG) && !defined(SIMDE_DEBUG)
# define SIMDE_NDEBUG 1
#endif
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
#include <stdint.h>
#if !defined(_WIN32)
# define SIMDE_SIZE_MODIFIER "z"
# define SIMDE_CHAR_MODIFIER "hh"
# define SIMDE_SHORT_MODIFIER "h"
#else
# if defined(_M_X64) || defined(__amd64__)
# define SIMDE_SIZE_MODIFIER "I64"
# else
# define SIMDE_SIZE_MODIFIER ""
# endif
# define SIMDE_CHAR_MODIFIER ""
# define SIMDE_SHORT_MODIFIER ""
#endif
#if defined(_MSC_VER) && (_MSC_VER >= 1500)
# define SIMDE_PUSH_DISABLE_MSVC_C4127_ __pragma(warning(push)) __pragma(warning(disable:4127))
# define SIMDE_POP_DISABLE_MSVC_C4127_ __pragma(warning(pop))
#else
# define SIMDE_PUSH_DISABLE_MSVC_C4127_
# define SIMDE_POP_DISABLE_MSVC_C4127_
#endif
#if !defined(simde_errorf)
# if defined(__has_include)
# if __has_include(<stdio.h>)
# include <stdio.h>
# endif
# elif defined(SIMDE_STDC_HOSTED)
# if SIMDE_STDC_HOSTED == 1
# include <stdio.h>
# endif
# elif defined(__STDC_HOSTED__)
# if __STDC_HOSTETD__ == 1
# include <stdio.h>
# endif
# endif
/* AUTOMATICALLY GENERATED FILE, DO NOT MODIFY */
/* 3f186a0f35bb73f01ffda73fa9bf060a444bb46b */
/* :: Begin debug-trap.h :: */
/* Debugging assertions and traps
* Portable Snippets - https://gitub.com/nemequ/portable-snippets
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* https://creativecommons.org/publicdomain/zero/1.0/
*
* SPDX-License-Identifier: CC0-1.0
*/
#if !defined(SIMDE_DEBUG_TRAP_H)
#define SIMDE_DEBUG_TRAP_H
#if !defined(SIMDE_NDEBUG) && defined(NDEBUG) && !defined(SIMDE_DEBUG)
# define SIMDE_NDEBUG 1
#endif
#if defined(__has_builtin) && !defined(__ibmxl__)
# if __has_builtin(__builtin_debugtrap)
# define simde_trap() __builtin_debugtrap()
# elif __has_builtin(__debugbreak)
# define simde_trap() __debugbreak()
# endif
#endif
#if !defined(simde_trap)
# if defined(_MSC_VER) || defined(__INTEL_COMPILER)
# define simde_trap() __debugbreak()
# elif defined(__ARMCC_VERSION)
# define simde_trap() __breakpoint(42)
# elif defined(__ibmxl__) || defined(__xlC__)
# include <builtins.h>
# define simde_trap() __trap(42)
# elif defined(__DMC__) && defined(_M_IX86)
static inline void simde_trap(void) { __asm int 3h; }
# elif defined(__i386__) || defined(__x86_64__)
static inline void simde_trap(void) { __asm__ __volatile__("int $03"); }
# elif defined(__thumb__)
static inline void simde_trap(void) { __asm__ __volatile__(".inst 0xde01"); }
# elif defined(__aarch64__)
static inline void simde_trap(void) { __asm__ __volatile__(".inst 0xd4200000"); }
# elif defined(__arm__)
static inline void simde_trap(void) { __asm__ __volatile__(".inst 0xe7f001f0"); }
# elif defined (__alpha__) && !defined(__osf__)
static inline void simde_trap(void) { __asm__ __volatile__("bpt"); }
# elif defined(_54_)
static inline void simde_trap(void) { __asm__ __volatile__("ESTOP"); }
# elif defined(_55_)
static inline void simde_trap(void) { __asm__ __volatile__(";\n .if (.MNEMONIC)\n ESTOP_1\n .else\n ESTOP_1()\n .endif\n NOP"); }
# elif defined(_64P_)
static inline void simde_trap(void) { __asm__ __volatile__("SWBP 0"); }
# elif defined(_6x_)
static inline void simde_trap(void) { __asm__ __volatile__("NOP\n .word 0x10000000"); }
# elif defined(__STDC_HOSTED__) && (__STDC_HOSTED__ == 0) && defined(__GNUC__)
# define simde_trap() __builtin_trap()
# else
# include <signal.h>
# if defined(SIGTRAP)
# define simde_trap() raise(SIGTRAP)
# else
# define simde_trap() raise(SIGABRT)
# endif
# endif
#endif
#if defined(HEDLEY_LIKELY)
# define SIMDE_DBG_LIKELY(expr) HEDLEY_LIKELY(expr)
#elif defined(__GNUC__) && (__GNUC__ >= 3)
# define SIMDE_DBG_LIKELY(expr) __builtin_expect(!!(expr), 1)
#else
# define SIMDE_DBG_LIKELY(expr) (!!(expr))
#endif
#if !defined(SIMDE_NDEBUG) || (SIMDE_NDEBUG == 0)
# define simde_dbg_assert(expr) do { \
if (!SIMDE_DBG_LIKELY(expr)) { \
simde_trap(); \
} \
} while (0)
#else
# define simde_dbg_assert(expr)
#endif
#endif /* !defined(SIMDE_DEBUG_TRAP_H) */
/* :: End debug-trap.h :: */
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_
# if defined(EOF)
# define simde_errorf(format, ...) (fprintf(stderr, format, __VA_ARGS__), abort())
# else
# define simde_errorf(format, ...) (simde_trap())
# endif
HEDLEY_DIAGNOSTIC_POP
#endif
#define simde_error(msg) simde_errorf("%s", msg)
#if defined(SIMDE_NDEBUG) || \
(defined(__cplusplus) && (__cplusplus < 201103L)) || \
(defined(__STDC__) && (__STDC__ < 199901L))
# if defined(SIMDE_CHECK_FAIL_DEFINED)
# define simde_assert(expr)
# else
# if defined(HEDLEY_ASSUME)
# define simde_assert(expr) HEDLEY_ASSUME(expr)
# elif HEDLEY_GCC_VERSION_CHECK(4,5,0)
# define simde_assert(expr) ((void) (!!(expr) ? 1 : (__builtin_unreachable(), 1)))
# elif HEDLEY_MSVC_VERSION_CHECK(13,10,0)
# define simde_assert(expr) __assume(expr)
# else
# define simde_assert(expr)
# endif
# endif
# define simde_assert_true(expr) simde_assert(expr)
# define simde_assert_false(expr) simde_assert(!(expr))
# define simde_assert_type_full(prefix, suffix, T, fmt, a, op, b) simde_assert(((a) op (b)))
# define simde_assert_double_equal(a, b, precision)
# define simde_assert_string_equal(a, b)
# define simde_assert_string_not_equal(a, b)
# define simde_assert_memory_equal(size, a, b)
# define simde_assert_memory_not_equal(size, a, b)
#else
# define simde_assert(expr) \
do { \
if (!HEDLEY_LIKELY(expr)) { \
simde_error("assertion failed: " #expr "\n"); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_true(expr) \
do { \
if (!HEDLEY_LIKELY(expr)) { \
simde_error("assertion failed: " #expr " is not true\n"); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_false(expr) \
do { \
if (!HEDLEY_LIKELY(!(expr))) { \
simde_error("assertion failed: " #expr " is not false\n"); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_type_full(prefix, suffix, T, fmt, a, op, b) \
do { \
T simde_tmp_a_ = (a); \
T simde_tmp_b_ = (b); \
if (!(simde_tmp_a_ op simde_tmp_b_)) { \
simde_errorf("assertion failed: %s %s %s (" prefix "%" fmt suffix " %s " prefix "%" fmt suffix ")\n", \
#a, #op, #b, simde_tmp_a_, #op, simde_tmp_b_); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_double_equal(a, b, precision) \
do { \
const double simde_tmp_a_ = (a); \
const double simde_tmp_b_ = (b); \
const double simde_tmp_diff_ = ((simde_tmp_a_ - simde_tmp_b_) < 0) ? \
-(simde_tmp_a_ - simde_tmp_b_) : \
(simde_tmp_a_ - simde_tmp_b_); \
if (HEDLEY_UNLIKELY(simde_tmp_diff_ > 1e-##precision)) { \
simde_errorf("assertion failed: %s == %s (%0." #precision "g == %0." #precision "g)\n", \
#a, #b, simde_tmp_a_, simde_tmp_b_); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# include <string.h>
# define simde_assert_string_equal(a, b) \
do { \
const char* simde_tmp_a_ = a; \
const char* simde_tmp_b_ = b; \
if (HEDLEY_UNLIKELY(strcmp(simde_tmp_a_, simde_tmp_b_) != 0)) { \
simde_errorf("assertion failed: string %s == %s (\"%s\" == \"%s\")\n", \
#a, #b, simde_tmp_a_, simde_tmp_b_); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_string_not_equal(a, b) \
do { \
const char* simde_tmp_a_ = a; \
const char* simde_tmp_b_ = b; \
if (HEDLEY_UNLIKELY(strcmp(simde_tmp_a_, simde_tmp_b_) == 0)) { \
simde_errorf("assertion failed: string %s != %s (\"%s\" == \"%s\")\n", \
#a, #b, simde_tmp_a_, simde_tmp_b_); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_memory_equal(size, a, b) \
do { \
const unsigned char* simde_tmp_a_ = (const unsigned char*) (a); \
const unsigned char* simde_tmp_b_ = (const unsigned char*) (b); \
const size_t simde_tmp_size_ = (size); \
if (HEDLEY_UNLIKELY(memcmp(simde_tmp_a_, simde_tmp_b_, simde_tmp_size_)) != 0) { \
size_t simde_tmp_pos_; \
for (simde_tmp_pos_ = 0 ; simde_tmp_pos_ < simde_tmp_size_ ; simde_tmp_pos_++) { \
if (simde_tmp_a_[simde_tmp_pos_] != simde_tmp_b_[simde_tmp_pos_]) { \
simde_errorf("assertion failed: memory %s == %s, at offset %" SIMDE_SIZE_MODIFIER "u\n", \
#a, #b, simde_tmp_pos_); \
break; \
} \
} \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
# define simde_assert_memory_not_equal(size, a, b) \
do { \
const unsigned char* simde_tmp_a_ = (const unsigned char*) (a); \
const unsigned char* simde_tmp_b_ = (const unsigned char*) (b); \
const size_t simde_tmp_size_ = (size); \
if (HEDLEY_UNLIKELY(memcmp(simde_tmp_a_, simde_tmp_b_, simde_tmp_size_)) == 0) { \
simde_errorf("assertion failed: memory %s != %s (%" SIMDE_SIZE_MODIFIER "u bytes)\n", \
#a, #b, simde_tmp_size_); \
} \
SIMDE_PUSH_DISABLE_MSVC_C4127_ \
} while (0) \
SIMDE_POP_DISABLE_MSVC_C4127_
#endif
#define simde_assert_type(T, fmt, a, op, b) \
simde_assert_type_full("", "", T, fmt, a, op, b)
#define simde_assert_char(a, op, b) \
simde_assert_type_full("'\\x", "'", char, "02" SIMDE_CHAR_MODIFIER "x", a, op, b)
#define simde_assert_uchar(a, op, b) \
simde_assert_type_full("'\\x", "'", unsigned char, "02" SIMDE_CHAR_MODIFIER "x", a, op, b)
#define simde_assert_short(a, op, b) \
simde_assert_type(short, SIMDE_SHORT_MODIFIER "d", a, op, b)
#define simde_assert_ushort(a, op, b) \
simde_assert_type(unsigned short, SIMDE_SHORT_MODIFIER "u", a, op, b)
#define simde_assert_int(a, op, b) \
simde_assert_type(int, "d", a, op, b)
#define simde_assert_uint(a, op, b) \
simde_assert_type(unsigned int, "u", a, op, b)
#define simde_assert_long(a, op, b) \
simde_assert_type(long int, "ld", a, op, b)
#define simde_assert_ulong(a, op, b) \
simde_assert_type(unsigned long int, "lu", a, op, b)
#define simde_assert_llong(a, op, b) \
simde_assert_type(long long int, "lld", a, op, b)
#define simde_assert_ullong(a, op, b) \
simde_assert_type(unsigned long long int, "llu", a, op, b)
#define simde_assert_size(a, op, b) \
simde_assert_type(size_t, SIMDE_SIZE_MODIFIER "u", a, op, b)
#define simde_assert_float(a, op, b) \
simde_assert_type(float, "f", a, op, b)
#define simde_assert_double(a, op, b) \
simde_assert_type(double, "g", a, op, b)
#define simde_assert_ptr(a, op, b) \
simde_assert_type(const void*, "p", a, op, b)
#define simde_assert_int8(a, op, b) \
simde_assert_type(int8_t, PRIi8, a, op, b)
#define simde_assert_uint8(a, op, b) \
simde_assert_type(uint8_t, PRIu8, a, op, b)
#define simde_assert_int16(a, op, b) \
simde_assert_type(int16_t, PRIi16, a, op, b)
#define simde_assert_uint16(a, op, b) \
simde_assert_type(uint16_t, PRIu16, a, op, b)
#define simde_assert_int32(a, op, b) \
simde_assert_type(int32_t, PRIi32, a, op, b)
#define simde_assert_uint32(a, op, b) \
simde_assert_type(uint32_t, PRIu32, a, op, b)
#define simde_assert_int64(a, op, b) \
simde_assert_type(int64_t, PRIi64, a, op, b)
#define simde_assert_uint64(a, op, b) \
simde_assert_type(uint64_t, PRIu64, a, op, b)
#define simde_assert_ptr_equal(a, b) \
simde_assert_ptr(a, ==, b)
#define simde_assert_ptr_not_equal(a, b) \
simde_assert_ptr(a, !=, b)
#define simde_assert_null(ptr) \
simde_assert_ptr(ptr, ==, NULL)
#define simde_assert_not_null(ptr) \
simde_assert_ptr(ptr, !=, NULL)
#define simde_assert_ptr_null(ptr) \
simde_assert_ptr(ptr, ==, NULL)
#define simde_assert_ptr_not_null(ptr) \
simde_assert_ptr(ptr, !=, NULL)
#endif /* !defined(SIMDE_CHECK_H) */
/* :: End check.h :: */
/* GCC/clang have a bunch of functionality in builtins which we would
* like to access, but the suffixes indicate whether the operate on
* int, long, or long long, not fixed width types (e.g., int32_t).
* we use these macros to attempt to map from fixed-width to the
* names GCC uses. Note that you should still cast the input(s) and
* return values (to/from SIMDE_BUILTIN_TYPE_*_) since often even if
* types are the same size they may not be compatible according to the
* compiler. For example, on x86 long and long lonsg are generally
* both 64 bits, but platforms vary on whether an int64_t is mapped
* to a long or long long. */
#include <limits.h>
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_
#if (INT8_MAX == INT_MAX) && (INT8_MIN == INT_MIN)
#define SIMDE_BUILTIN_SUFFIX_8_
#define SIMDE_BUILTIN_TYPE_8_ int
#elif (INT8_MAX == LONG_MAX) && (INT8_MIN == LONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_8_ l
#define SIMDE_BUILTIN_TYPE_8_ long
#elif (INT8_MAX == LLONG_MAX) && (INT8_MIN == LLONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_8_ ll
#define SIMDE_BUILTIN_TYPE_8_ long long
#endif
#if (INT16_MAX == INT_MAX) && (INT16_MIN == INT_MIN)
#define SIMDE_BUILTIN_SUFFIX_16_
#define SIMDE_BUILTIN_TYPE_16_ int
#elif (INT16_MAX == LONG_MAX) && (INT16_MIN == LONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_16_ l
#define SIMDE_BUILTIN_TYPE_16_ long
#elif (INT16_MAX == LLONG_MAX) && (INT16_MIN == LLONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_16_ ll
#define SIMDE_BUILTIN_TYPE_16_ long long
#endif
#if (INT32_MAX == INT_MAX) && (INT32_MIN == INT_MIN)
#define SIMDE_BUILTIN_SUFFIX_32_
#define SIMDE_BUILTIN_TYPE_32_ int
#elif (INT32_MAX == LONG_MAX) && (INT32_MIN == LONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_32_ l
#define SIMDE_BUILTIN_TYPE_32_ long
#elif (INT32_MAX == LLONG_MAX) && (INT32_MIN == LLONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_32_ ll
#define SIMDE_BUILTIN_TYPE_32_ long long
#endif
#if (INT64_MAX == INT_MAX) && (INT64_MIN == INT_MIN)
#define SIMDE_BUILTIN_SUFFIX_64_
#define SIMDE_BUILTIN_TYPE_64_ int
#elif (INT64_MAX == LONG_MAX) && (INT64_MIN == LONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_64_ l
#define SIMDE_BUILTIN_TYPE_64_ long
#elif (INT64_MAX == LLONG_MAX) && (INT64_MIN == LLONG_MIN)
#define SIMDE_BUILTIN_SUFFIX_64_ ll
#define SIMDE_BUILTIN_TYPE_64_ long long
#endif
#if defined(SIMDE_BUILTIN_SUFFIX_8_)
#define SIMDE_BUILTIN_8_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_8_)
#define SIMDE_BUILTIN_HAS_8_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_8_))
#else
#define SIMDE_BUILTIN_HAS_8_(name) 0
#endif
#if defined(SIMDE_BUILTIN_SUFFIX_16_)
#define SIMDE_BUILTIN_16_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_16_)
#define SIMDE_BUILTIN_HAS_16_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_16_))
#else
#define SIMDE_BUILTIN_HAS_16_(name) 0
#endif
#if defined(SIMDE_BUILTIN_SUFFIX_32_)
#define SIMDE_BUILTIN_32_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_32_)
#define SIMDE_BUILTIN_HAS_32_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_32_))
#else
#define SIMDE_BUILTIN_HAS_32_(name) 0
#endif
#if defined(SIMDE_BUILTIN_SUFFIX_64_)
#define SIMDE_BUILTIN_64_(name) HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_64_)
#define SIMDE_BUILTIN_HAS_64_(name) HEDLEY_HAS_BUILTIN(HEDLEY_CONCAT3(__builtin_, name, SIMDE_BUILTIN_SUFFIX_64_))
#else
#define SIMDE_BUILTIN_HAS_64_(name) 0
#endif
#if !defined(__cplusplus)
#if defined(__clang__)
#if HEDLEY_HAS_WARNING("-Wc11-extensions")
#define SIMDE_GENERIC_(...) (__extension__ ({ \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc11-extensions\"") \
_Generic(__VA_ARGS__); \
HEDLEY_DIAGNOSTIC_POP \
}))
#elif HEDLEY_HAS_WARNING("-Wc1x-extensions")
#define SIMDE_GENERIC_(...) (__extension__ ({ \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc1x-extensions\"") \
_Generic(__VA_ARGS__); \
HEDLEY_DIAGNOSTIC_POP \
}))
#endif
#elif \
defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) || \
HEDLEY_HAS_EXTENSION(c_generic_selections) || \
HEDLEY_GCC_VERSION_CHECK(4,9,0) || \
HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \
HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
HEDLEY_ARM_VERSION_CHECK(5,3,0)
#define SIMDE_GENERIC_(...) _Generic(__VA_ARGS__)
#endif
#endif
/* Sometimes we run into problems with specific versions of compilers
which make the native versions unusable for us. Often this is due
to missing functions, sometimes buggy implementations, etc. These
macros are how we check for specific bugs. As they are fixed we'll
start only defining them for problematic compiler versions. */
#if !defined(SIMDE_IGNORE_COMPILER_BUGS)
# if defined(HEDLEY_GCC_VERSION)
# if !HEDLEY_GCC_VERSION_CHECK(4,9,0)
# define SIMDE_BUG_GCC_REV_208793
# endif
# if !HEDLEY_GCC_VERSION_CHECK(5,0,0)
# define SIMDE_BUG_GCC_BAD_MM_SRA_EPI32 /* TODO: find relevant bug or commit */
# endif
# if !HEDLEY_GCC_VERSION_CHECK(6,0,0)
# define SIMDE_BUG_GCC_SIZEOF_IMMEDIATE
# endif
# if !HEDLEY_GCC_VERSION_CHECK(4,6,0)
# define SIMDE_BUG_GCC_BAD_MM_EXTRACT_EPI8 /* TODO: find relevant bug or commit */
# endif
# if !HEDLEY_GCC_VERSION_CHECK(8,0,0)
# define SIMDE_BUG_GCC_REV_247851
# endif
# if !HEDLEY_GCC_VERSION_CHECK(10,0,0)
# define SIMDE_BUG_GCC_REV_274313
# define SIMDE_BUG_GCC_91341
# define SIMDE_BUG_GCC_92035
# endif
# if !HEDLEY_GCC_VERSION_CHECK(9,0,0) && defined(SIMDE_ARCH_AARCH64)
# define SIMDE_BUG_GCC_ARM_SHIFT_SCALAR
# endif
# if !HEDLEY_GCC_VERSION_CHECK(9,0,0) && defined(SIMDE_ARCH_AARCH64)
# define SIMDE_BUG_GCC_BAD_VEXT_REV32
# endif
# if defined(SIMDE_ARCH_X86) && !defined(SIMDE_ARCH_AMD64)
# define SIMDE_BUG_GCC_94482
# endif
# if (defined(SIMDE_ARCH_X86) && !defined(SIMDE_ARCH_AMD64)) || defined(SIMDE_ARCH_ZARCH)
# define SIMDE_BUG_GCC_53784
# endif
# if defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64)
# if HEDLEY_GCC_VERSION_CHECK(4,3,0) /* -Wsign-conversion */
# define SIMDE_BUG_GCC_95144
# endif
# if !HEDLEY_GCC_VERSION_CHECK(11,0,0)
# define SIMDE_BUG_GCC_95483
# endif
# if defined(__OPTIMIZE__)
# define SIMDE_BUG_GCC_100927
# endif
# define SIMDE_BUG_GCC_98521
# endif
# if !HEDLEY_GCC_VERSION_CHECK(9,4,0) && defined(SIMDE_ARCH_AARCH64)
# define SIMDE_BUG_GCC_94488
# endif
# if !HEDLEY_GCC_VERSION_CHECK(9,1,0) && defined(SIMDE_ARCH_AARCH64)
# define SIMDE_BUG_GCC_REV_264019
# endif
# if defined(SIMDE_ARCH_ARM)
# define SIMDE_BUG_GCC_95399
# define SIMDE_BUG_GCC_95471
# elif defined(SIMDE_ARCH_POWER)
# define SIMDE_BUG_GCC_95227
# define SIMDE_BUG_GCC_95782
# define SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS
# elif defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64)
# if !HEDLEY_GCC_VERSION_CHECK(10,2,0) && !defined(__OPTIMIZE__)
# define SIMDE_BUG_GCC_96174
# endif
# elif defined(SIMDE_ARCH_ZARCH)
# define SIMDE_BUG_GCC_95782
# if HEDLEY_GCC_VERSION_CHECK(10,0,0)
# define SIMDE_BUG_GCC_101614
# endif
# endif
# if defined(SIMDE_ARCH_MIPS_MSA)
# define SIMDE_BUG_GCC_97248
# define SIMDE_BUG_GCC_100760
# define SIMDE_BUG_GCC_100761
# define SIMDE_BUG_GCC_100762
# endif
# define SIMDE_BUG_GCC_95399
# elif defined(__clang__)
# if defined(SIMDE_ARCH_AARCH64)
# define SIMDE_BUG_CLANG_45541
# define SIMDE_BUG_CLANG_46844
# define SIMDE_BUG_CLANG_48257
# if !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)
# define SIMDE_BUG_CLANG_46840
# endif
# if SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0)
# define SIMDE_BUG_CLANG_BAD_VI64_OPS
# endif
# if SIMDE_DETECT_CLANG_VERSION_NOT(9,0,0)
# define SIMDE_BUG_CLANG_GIT_4EC445B8
# define SIMDE_BUG_CLANG_REV_365298 /* 0464e07c8f6e3310c28eb210a4513bc2243c2a7e */
# endif
# endif
# if defined(SIMDE_ARCH_ARM)
# if !SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0)
# define SIMDE_BUG_CLANG_BAD_VGET_SET_LANE_TYPES
# endif
# endif
# if defined(SIMDE_ARCH_POWER) && !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)
# define SIMDE_BUG_CLANG_46770
# endif
# if defined(SIMDE_ARCH_POWER) && (SIMDE_ARCH_POWER == 700) && (SIMDE_DETECT_CLANG_VERSION_CHECK(11,0,0))
# define SIMDE_BUG_CLANG_50893
# define SIMDE_BUG_CLANG_50901
# endif
# if defined(_ARCH_PWR9) && !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0) && !defined(__OPTIMIZE__)
# define SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT
# endif
# if defined(SIMDE_ARCH_POWER)
# define SIMDE_BUG_CLANG_50932
# if !SIMDE_DETECT_CLANG_VERSION_CHECK(12,0,0)
# define SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS
# endif
# endif
# if defined(SIMDE_ARCH_X86) || defined(SIMDE_ARCH_AMD64)
# if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0)
# define SIMDE_BUG_CLANG_REV_298042 /* 6afc436a7817a52e78ae7bcdc3faafd460124cac */
# endif
# if SIMDE_DETECT_CLANG_VERSION_NOT(3,7,0)
# define SIMDE_BUG_CLANG_REV_234560 /* b929ad7b1726a32650a8051f69a747fb6836c540 */
# endif
# if SIMDE_DETECT_CLANG_VERSION_CHECK(3,8,0) && SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0)
# define SIMDE_BUG_CLANG_BAD_MADD
# endif
# if SIMDE_DETECT_CLANG_VERSION_CHECK(4,0,0) && SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0)
# define SIMDE_BUG_CLANG_REV_299346 /* ac9959eb533a58482ea4da6c4db1e635a98de384 */
# endif
# if SIMDE_DETECT_CLANG_VERSION_NOT(8,0,0)
# define SIMDE_BUG_CLANG_REV_344862 /* eae26bf73715994c2bd145f9b6dc3836aa4ffd4f */
# endif
# if HEDLEY_HAS_WARNING("-Wsign-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0)
# define SIMDE_BUG_CLANG_45931
# endif
# if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0)
# define SIMDE_BUG_CLANG_44589
# endif
# define SIMDE_BUG_CLANG_48673
# endif
# define SIMDE_BUG_CLANG_45959
# elif defined(HEDLEY_MSVC_VERSION)
# if defined(SIMDE_ARCH_X86)
# define SIMDE_BUG_MSVC_ROUND_EXTRACT
# endif
# elif defined(HEDLEY_INTEL_VERSION)
# define SIMDE_BUG_INTEL_857088
# elif defined(HEDLEY_MCST_LCC_VERSION)
# define SIMDE_BUG_MCST_LCC_MISSING_AVX_LOAD_STORE_M128_FUNCS
# define SIMDE_BUG_MCST_LCC_MISSING_CMOV_M256
# define SIMDE_BUG_MCST_LCC_FMA_WRONG_RESULT
# elif defined(HEDLEY_PGI_VERSION)
# define SIMDE_BUG_PGI_30104
# define SIMDE_BUG_PGI_30107
# define SIMDE_BUG_PGI_30106
# endif
#endif
/* GCC and Clang both have the same issue:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95144
* https://bugs.llvm.org/show_bug.cgi?id=45931
* This is just an easy way to work around it.
*/
#if \
(HEDLEY_HAS_WARNING("-Wsign-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(11,0,0)) || \
HEDLEY_GCC_VERSION_CHECK(4,3,0)
# define SIMDE_BUG_IGNORE_SIGN_CONVERSION(expr) (__extension__ ({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_POP \
_Pragma("GCC diagnostic ignored \"-Wsign-conversion\"") \
__typeof__(expr) simde_bug_ignore_sign_conversion_v_= (expr); \
HEDLEY_DIAGNOSTIC_PUSH \
simde_bug_ignore_sign_conversion_v_; \
}))
#else
# define SIMDE_BUG_IGNORE_SIGN_CONVERSION(expr) (expr)
#endif
/* Usually the shift count is signed (for example, NEON or SSE).
* OTOH, unsigned is good for PPC (vec_srl uses unsigned), and the only option for E2K.
* Further info: https://github.com/simd-everywhere/simde/pull/700
*/
#if defined(SIMDE_ARCH_E2K) || defined(SIMDE_ARCH_POWER)
#define SIMDE_CAST_VECTOR_SHIFT_COUNT(width, value) HEDLEY_STATIC_CAST(uint##width##_t, (value))
#else
#define SIMDE_CAST_VECTOR_SHIFT_COUNT(width, value) HEDLEY_STATIC_CAST(int##width##_t, (value))
#endif
/* SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ */
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_COMMON_H) */
/* :: End simde-common.h :: */
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
#if defined(SIMDE_X86_MMX_NATIVE)
#define SIMDE_X86_MMX_USE_NATIVE_TYPE
#elif defined(SIMDE_X86_SSE_NATIVE)
#define SIMDE_X86_MMX_USE_NATIVE_TYPE
#endif
#if defined(SIMDE_X86_MMX_USE_NATIVE_TYPE)
#include <mmintrin.h>
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#include <arm_neon.h>
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
#include <loongson-mmiintrin.h>
#endif
#include <stdint.h>
#include <limits.h>
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_8 int8_t i8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 int16_t i16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 int32_t i32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 int64_t i64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 uint8_t u8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 uint16_t u16 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 uint32_t u32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 uint64_t u64 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 simde_float32 f32 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 int_fast32_t i32f SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_8 uint_fast32_t u32f SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_8 int8_t i8[8];
SIMDE_ALIGN_TO_8 int16_t i16[4];
SIMDE_ALIGN_TO_8 int32_t i32[2];
SIMDE_ALIGN_TO_8 int64_t i64[1];
SIMDE_ALIGN_TO_8 uint8_t u8[8];
SIMDE_ALIGN_TO_8 uint16_t u16[4];
SIMDE_ALIGN_TO_8 uint32_t u32[2];
SIMDE_ALIGN_TO_8 uint64_t u64[1];
SIMDE_ALIGN_TO_8 simde_float32 f32[2];
SIMDE_ALIGN_TO_8 int_fast32_t i32f[8 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_8 uint_fast32_t u32f[8 / sizeof(uint_fast32_t)];
#endif
#if defined(SIMDE_X86_MMX_USE_NATIVE_TYPE)
__m64 n;
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x8_t neon_i8;
int16x4_t neon_i16;
int32x2_t neon_i32;
int64x1_t neon_i64;
uint8x8_t neon_u8;
uint16x4_t neon_u16;
uint32x2_t neon_u32;
uint64x1_t neon_u64;
float32x2_t neon_f32;
#endif
#if defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
int8x8_t mmi_i8;
int16x4_t mmi_i16;
int32x2_t mmi_i32;
int64_t mmi_i64;
uint8x8_t mmi_u8;
uint16x4_t mmi_u16;
uint32x2_t mmi_u32;
uint64_t mmi_u64;
#endif
} simde__m64_private;
#if defined(SIMDE_X86_MMX_USE_NATIVE_TYPE)
typedef __m64 simde__m64;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef int32x2_t simde__m64;
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
typedef int32x2_t simde__m64;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef int32_t simde__m64 SIMDE_ALIGN_TO_8 SIMDE_VECTOR(8) SIMDE_MAY_ALIAS;
#else
typedef simde__m64_private simde__m64;
#endif
#if !defined(SIMDE_X86_MMX_USE_NATIVE_TYPE) && defined(SIMDE_ENABLE_NATIVE_ALIASES)
#define SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES
typedef simde__m64 __m64;
#endif
HEDLEY_STATIC_ASSERT(8 == sizeof(simde__m64), "__m64 size incorrect");
HEDLEY_STATIC_ASSERT(8 == sizeof(simde__m64_private), "__m64 size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m64) == 8, "simde__m64 is not 8-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m64_private) == 8, "simde__m64_private is not 8-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde__m64_from_private(simde__m64_private v) {
simde__m64 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64_private
simde__m64_to_private(simde__m64 v) {
simde__m64_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#define SIMDE_X86_GENERATE_CONVERSION_FUNCTION(simde_type, source_type, isax, fragment) \
SIMDE_FUNCTION_ATTRIBUTES \
simde__##simde_type \
simde__##simde_type##_from_##isax##_##fragment(source_type value) { \
simde__##simde_type##_private r_; \
r_.isax##_##fragment = value; \
return simde__##simde_type##_from_private(r_); \
} \
\
SIMDE_FUNCTION_ATTRIBUTES \
source_type \
simde__##simde_type##_to_##isax##_##fragment(simde__##simde_type value) { \
simde__##simde_type##_private r_ = simde__##simde_type##_to_private(value); \
return r_.isax##_##fragment; \
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int8x8_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int16x4_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int32x2_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int64x1_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint8x8_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint16x4_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint32x2_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint64x1_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, float32x2_t, neon, f32)
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int8x8_t, mmi, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int16x4_t, mmi, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int32x2_t, mmi, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, int64_t, mmi, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint8x8_t, mmi, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint16x4_t, mmi, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint32x2_t, mmi, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m64, uint64_t, mmi, u64)
#endif /* defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_add_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_add_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vadd_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = paddb_s(a_.mmi_i8, b_.mmi_i8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = a_.i8 + b_.i8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = a_.i8[i] + b_.i8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddb(a, b) simde_mm_add_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_add_pi8(a, b) simde_mm_add_pi8(a, b)
# define _m_paddb(a, b) simde_m_paddb(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_add_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_add_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vadd_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = paddh_s(a_.mmi_i16, b_.mmi_i16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = a_.i16 + b_.i16;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] + b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddw(a, b) simde_mm_add_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_add_pi16(a, b) simde_mm_add_pi16(a, b)
# define _m_paddw(a, b) simde_mm_add_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_add_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_add_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vadd_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = paddw_s(a_.mmi_i32, b_.mmi_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 + b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] + b_.i32[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddd(a, b) simde_mm_add_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_add_pi32(a, b) simde_mm_add_pi32(a, b)
# define _m_paddd(a, b) simde_mm_add_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_adds_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_adds_pi8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vqadd_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = paddsb(a_.mmi_i8, b_.mmi_i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
if ((((b_.i8[i]) > 0) && ((a_.i8[i]) > (INT8_MAX - (b_.i8[i]))))) {
r_.i8[i] = INT8_MAX;
} else if ((((b_.i8[i]) < 0) && ((a_.i8[i]) < (INT8_MIN - (b_.i8[i]))))) {
r_.i8[i] = INT8_MIN;
} else {
r_.i8[i] = (a_.i8[i]) + (b_.i8[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddsb(a, b) simde_mm_adds_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_adds_pi8(a, b) simde_mm_adds_pi8(a, b)
# define _m_paddsb(a, b) simde_mm_adds_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_adds_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_adds_pu8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vqadd_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_u8 = paddusb(a_.mmi_u8, b_.mmi_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
const uint_fast16_t x = HEDLEY_STATIC_CAST(uint_fast16_t, a_.u8[i]) + HEDLEY_STATIC_CAST(uint_fast16_t, b_.u8[i]);
if (x > UINT8_MAX)
r_.u8[i] = UINT8_MAX;
else
r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, x);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddusb(a, b) simde_mm_adds_pu8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_adds_pu8(a, b) simde_mm_adds_pu8(a, b)
# define _m_paddusb(a, b) simde_mm_adds_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_adds_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_adds_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vqadd_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = paddsh(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
if ((((b_.i16[i]) > 0) && ((a_.i16[i]) > (INT16_MAX - (b_.i16[i]))))) {
r_.i16[i] = INT16_MAX;
} else if ((((b_.i16[i]) < 0) && ((a_.i16[i]) < (SHRT_MIN - (b_.i16[i]))))) {
r_.i16[i] = SHRT_MIN;
} else {
r_.i16[i] = (a_.i16[i]) + (b_.i16[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddsw(a, b) simde_mm_adds_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_adds_pi16(a, b) simde_mm_adds_pi16(a, b)
# define _m_paddsw(a, b) simde_mm_adds_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_adds_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_adds_pu16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vqadd_u16(a_.neon_u16, b_.neon_u16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_u16 = paddush(a_.mmi_u16, b_.mmi_u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
const uint32_t x = a_.u16[i] + b_.u16[i];
if (x > UINT16_MAX)
r_.u16[i] = UINT16_MAX;
else
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, x);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_paddusw(a, b) simde_mm_adds_pu16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_adds_pu16(a, b) simde_mm_adds_pu16(a, b)
# define _m_paddusw(a, b) simde_mm_adds_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_and_si64 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_and_si64(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vand_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 & b_.i64;
#else
r_.i64[0] = a_.i64[0] & b_.i64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pand(a, b) simde_mm_and_si64(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_and_si64(a, b) simde_mm_and_si64(a, b)
# define _m_pand(a, b) simde_mm_and_si64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_andnot_si64 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_andnot_si64(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbic_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = pandn_sw(a_.mmi_i32, b_.mmi_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = ~a_.i32f & b_.i32f;
#else
r_.u64[0] = (~(a_.u64[0])) & (b_.u64[0]);
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pandn(a, b) simde_mm_andnot_si64(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_si64(a, b) simde_mm_andnot_si64(a, b)
# define _m_pandn(a, b) simde_mm_andnot_si64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cmpeq_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cmpeq_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vceq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = pcmpeqb_s(a_.mmi_i8, b_.mmi_i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] == b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pcmpeqb(a, b) simde_mm_cmpeq_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_pi8(a, b) simde_mm_cmpeq_pi8(a, b)
# define _m_pcmpeqb(a, b) simde_mm_cmpeq_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cmpeq_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cmpeq_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vceq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = pcmpeqh_s(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] == b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pcmpeqw(a, b) simde_mm_cmpeq_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_pi16(a, b) simde_mm_cmpeq_pi16(a, b)
# define _m_pcmpeqw(a, b) simde_mm_cmpeq_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cmpeq_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cmpeq_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = pcmpeqw_s(a_.mmi_i32, b_.mmi_i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] == b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pcmpeqd(a, b) simde_mm_cmpeq_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_pi32(a, b) simde_mm_cmpeq_pi32(a, b)
# define _m_pcmpeqd(a, b) simde_mm_cmpeq_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cmpgt_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cmpgt_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcgt_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = pcmpgtb_s(a_.mmi_i8, b_.mmi_i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] > b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pcmpgtb(a, b) simde_mm_cmpgt_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_pi8(a, b) simde_mm_cmpgt_pi8(a, b)
# define _m_pcmpgtb(a, b) simde_mm_cmpgt_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cmpgt_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cmpgt_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcgt_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = pcmpgth_s(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pcmpgtw(a, b) simde_mm_cmpgt_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_pi16(a, b) simde_mm_cmpgt_pi16(a, b)
# define _m_pcmpgtw(a, b) simde_mm_cmpgt_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cmpgt_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cmpgt_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgt_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = pcmpgtw_s(a_.mmi_i32, b_.mmi_i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] > b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pcmpgtd(a, b) simde_mm_cmpgt_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_pi32(a, b) simde_mm_cmpgt_pi32(a, b)
# define _m_pcmpgtd(a, b) simde_mm_cmpgt_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtm64_si64 (simde__m64 a) {
#if defined(SIMDE_X86_MMX_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(__PGI)
return _mm_cvtm64_si64(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)
#pragma clang diagnostic ignored "-Wvector-conversion"
#endif
return vget_lane_s64(a_.neon_i64, 0);
HEDLEY_DIAGNOSTIC_POP
#else
return a_.i64[0];
#endif
#endif
}
#define simde_m_to_int64(a) simde_mm_cvtm64_si64(a)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtm64_si64(a) simde_mm_cvtm64_si64(a)
# define _m_to_int64(a) simde_mm_cvtm64_si64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtsi32_si64 (int32_t a) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtsi32_si64(a);
#else
simde__m64_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int32_t av[2] = { a, 0 };
r_.neon_i32 = vld1_s32(av);
#else
r_.i32[0] = a;
r_.i32[1] = 0;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_from_int(a) simde_mm_cvtsi32_si64(a)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_si64(a) simde_mm_cvtsi32_si64(a)
# define _m_from_int(a) simde_mm_cvtsi32_si64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtsi64_m64 (int64_t a) {
#if defined(SIMDE_X86_MMX_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(__PGI)
return _mm_cvtsi64_m64(a);
#else
simde__m64_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vld1_s64(&a);
#else
r_.i64[0] = a;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_from_int64(a) simde_mm_cvtsi64_m64(a)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_m64(a) simde_mm_cvtsi64_m64(a)
# define _m_from_int64(a) simde_mm_cvtsi64_m64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtsi64_si32 (simde__m64 a) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtsi64_si32(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)
#pragma clang diagnostic ignored "-Wvector-conversion"
#endif
return vget_lane_s32(a_.neon_i32, 0);
HEDLEY_DIAGNOSTIC_POP
#else
return a_.i32[0];
#endif
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_si32(a) simde_mm_cvtsi64_si32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_empty (void) {
#if defined(SIMDE_X86_MMX_NATIVE)
_mm_empty();
#else
/* noop */
#endif
}
#define simde_m_empty() simde_mm_empty()
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_empty() simde_mm_empty()
# define _m_empty() simde_mm_empty()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_madd_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_madd_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t i1 = vmull_s16(a_.neon_i16, b_.neon_i16);
r_.neon_i32 = vpadd_s32(vget_low_s32(i1), vget_high_s32(i1));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = pmaddhw(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i += 2) {
r_.i32[i / 2] = (a_.i16[i] * b_.i16[i]) + (a_.i16[i + 1] * b_.i16[i + 1]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaddwd(a, b) simde_mm_madd_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_madd_pi16(a, b) simde_mm_madd_pi16(a, b)
# define _m_pmaddwd(a, b) simde_mm_madd_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int32x4_t t1 = vmull_s16(a_.neon_i16, b_.neon_i16);
const uint32x4_t t2 = vshrq_n_u32(vreinterpretq_u32_s32(t1), 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = pmulhh(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((a_.i16[i] * b_.i16[i]) >> 16));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhw(a, b) simde_mm_mulhi_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pi16(a, b) simde_mm_mulhi_pi16(a, b)
# define _m_pmulhw(a, b) simde_mm_mulhi_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mullo_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_mullo_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int32x4_t t1 = vmull_s16(a_.neon_i16, b_.neon_i16);
const uint16x4_t t2 = vmovn_u32(vreinterpretq_u32_s32(t1));
r_.neon_u16 = t2;
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = pmullh(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((a_.i16[i] * b_.i16[i]) & 0xffff));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmullw(a, b) simde_mm_mullo_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_mullo_pi16(a, b) simde_mm_mullo_pi16(a, b)
# define _m_pmullw(a, b) simde_mm_mullo_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_or_si64 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_or_si64(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorr_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 | b_.i64;
#else
r_.i64[0] = a_.i64[0] | b_.i64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_por(a, b) simde_mm_or_si64(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_or_si64(a, b) simde_mm_or_si64(a, b)
# define _m_por(a, b) simde_mm_or_si64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_packs_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_packs_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vqmovn_s16(vcombine_s16(a_.neon_i16, b_.neon_i16));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = packsshb(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
if (a_.i16[i] < INT8_MIN) {
r_.i8[i] = INT8_MIN;
} else if (a_.i16[i] > INT8_MAX) {
r_.i8[i] = INT8_MAX;
} else {
r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, a_.i16[i]);
}
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
if (b_.i16[i] < INT8_MIN) {
r_.i8[i + 4] = INT8_MIN;
} else if (b_.i16[i] > INT8_MAX) {
r_.i8[i + 4] = INT8_MAX;
} else {
r_.i8[i + 4] = HEDLEY_STATIC_CAST(int8_t, b_.i16[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_packsswb(a, b) simde_mm_packs_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_packs_pi16(a, b) simde_mm_packs_pi16(a, b)
# define _m_packsswb(a, b) simde_mm_packs_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_packs_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_packs_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vqmovn_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = packsswh(a_.mmi_i32, b_.mmi_i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (8 / sizeof(a_.i32[0])) ; i++) {
if (a_.i32[i] < SHRT_MIN) {
r_.i16[i] = SHRT_MIN;
} else if (a_.i32[i] > INT16_MAX) {
r_.i16[i] = INT16_MAX;
} else {
r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i32[i]);
}
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (8 / sizeof(b_.i32[0])) ; i++) {
if (b_.i32[i] < SHRT_MIN) {
r_.i16[i + 2] = SHRT_MIN;
} else if (b_.i32[i] > INT16_MAX) {
r_.i16[i + 2] = INT16_MAX;
} else {
r_.i16[i + 2] = HEDLEY_STATIC_CAST(int16_t, b_.i32[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_packssdw(a, b) simde_mm_packs_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_packs_pi32(a, b) simde_mm_packs_pi32(a, b)
# define _m_packssdw(a, b) simde_mm_packs_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_packs_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_packs_pu16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
const int16x8_t t1 = vcombine_s16(a_.neon_i16, b_.neon_i16);
/* Set elements which are < 0 to 0 */
const int16x8_t t2 = vandq_s16(t1, vreinterpretq_s16_u16(vcgezq_s16(t1)));
/* Vector with all s16 elements set to UINT8_MAX */
const int16x8_t vmax = vmovq_n_s16(HEDLEY_STATIC_CAST(int16_t, UINT8_MAX));
/* Elements which are within the acceptable range */
const int16x8_t le_max = vandq_s16(t2, vreinterpretq_s16_u16(vcleq_s16(t2, vmax)));
const int16x8_t gt_max = vandq_s16(vmax, vreinterpretq_s16_u16(vcgtq_s16(t2, vmax)));
/* Final values as 16-bit integers */
const int16x8_t values = vorrq_s16(le_max, gt_max);
r_.neon_u8 = vmovn_u16(vreinterpretq_u16_s16(values));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_u8 = packushb(a_.mmi_u16, b_.mmi_u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
if (a_.i16[i] > UINT8_MAX) {
r_.u8[i] = UINT8_MAX;
} else if (a_.i16[i] < 0) {
r_.u8[i] = 0;
} else {
r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, a_.i16[i]);
}
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
if (b_.i16[i] > UINT8_MAX) {
r_.u8[i + 4] = UINT8_MAX;
} else if (b_.i16[i] < 0) {
r_.u8[i + 4] = 0;
} else {
r_.u8[i + 4] = HEDLEY_STATIC_CAST(uint8_t, b_.i16[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_packuswb(a, b) simde_mm_packs_pu16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_packs_pu16(a, b) simde_mm_packs_pu16(a, b)
# define _m_packuswb(a, b) simde_mm_packs_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_set_pi8 (int8_t e7, int8_t e6, int8_t e5, int8_t e4, int8_t e3, int8_t e2, int8_t e1, int8_t e0) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_set_pi8(e7, e6, e5, e4, e3, e2, e1, e0);
#else
simde__m64_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int8_t v[sizeof(r_.i8) / sizeof(r_.i8[0])] = { e0, e1, e2, e3, e4, e5, e6, e7 };
r_.neon_i8 = vld1_s8(v);
#else
r_.i8[0] = e0;
r_.i8[1] = e1;
r_.i8[2] = e2;
r_.i8[3] = e3;
r_.i8[4] = e4;
r_.i8[5] = e5;
r_.i8[6] = e6;
r_.i8[7] = e7;
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_set_pi8(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_set_pi8(e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_set_pu8 (uint8_t e7, uint8_t e6, uint8_t e5, uint8_t e4, uint8_t e3, uint8_t e2, uint8_t e1, uint8_t e0) {
simde__m64_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.n = _mm_set_pi8(
HEDLEY_STATIC_CAST(int8_t, e7),
HEDLEY_STATIC_CAST(int8_t, e6),
HEDLEY_STATIC_CAST(int8_t, e5),
HEDLEY_STATIC_CAST(int8_t, e4),
HEDLEY_STATIC_CAST(int8_t, e3),
HEDLEY_STATIC_CAST(int8_t, e2),
HEDLEY_STATIC_CAST(int8_t, e1),
HEDLEY_STATIC_CAST(int8_t, e0));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint8_t v[sizeof(r_.u8) / sizeof(r_.u8[0])] = { e0, e1, e2, e3, e4, e5, e6, e7 };
r_.neon_u8 = vld1_u8(v);
#else
r_.u8[0] = e0;
r_.u8[1] = e1;
r_.u8[2] = e2;
r_.u8[3] = e3;
r_.u8[4] = e4;
r_.u8[5] = e5;
r_.u8[6] = e6;
r_.u8[7] = e7;
#endif
return simde__m64_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_set_pi16 (int16_t e3, int16_t e2, int16_t e1, int16_t e0) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_set_pi16(e3, e2, e1, e0);
#else
simde__m64_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int16_t v[sizeof(r_.i16) / sizeof(r_.i16[0])] = { e0, e1, e2, e3 };
r_.neon_i16 = vld1_s16(v);
#else
r_.i16[0] = e0;
r_.i16[1] = e1;
r_.i16[2] = e2;
r_.i16[3] = e3;
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_set_pi16(e3, e2, e1, e0) simde_mm_set_pi16(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_set_pu16 (uint16_t e3, uint16_t e2, uint16_t e1, uint16_t e0) {
simde__m64_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.n = _mm_set_pi16(
HEDLEY_STATIC_CAST(int16_t, e3),
HEDLEY_STATIC_CAST(int16_t, e2),
HEDLEY_STATIC_CAST(int16_t, e1),
HEDLEY_STATIC_CAST(int16_t, e0)
);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint16_t v[sizeof(r_.u16) / sizeof(r_.u16[0])] = { e0, e1, e2, e3 };
r_.neon_u16 = vld1_u16(v);
#else
r_.u16[0] = e0;
r_.u16[1] = e1;
r_.u16[2] = e2;
r_.u16[3] = e3;
#endif
return simde__m64_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_set_pu32 (uint32_t e1, uint32_t e0) {
simde__m64_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.n = _mm_set_pi32(
HEDLEY_STATIC_CAST(int32_t, e1),
HEDLEY_STATIC_CAST(int32_t, e0));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32_t v[sizeof(r_.u32) / sizeof(r_.u32[0])] = { e0, e1 };
r_.neon_u32 = vld1_u32(v);
#else
r_.u32[0] = e0;
r_.u32[1] = e1;
#endif
return simde__m64_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_set_pi32 (int32_t e1, int32_t e0) {
simde__m64_private r_;
#if defined(SIMDE_X86_MMX_NATIVE)
r_.n = _mm_set_pi32(e1, e0);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int32_t v[sizeof(r_.i32) / sizeof(r_.i32[0])] = { e0, e1 };
r_.neon_i32 = vld1_s32(v);
#else
r_.i32[0] = e0;
r_.i32[1] = e1;
#endif
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_set_pi32(e1, e0) simde_mm_set_pi32(e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_set_pi64 (int64_t e0) {
simde__m64_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int64_t v[sizeof(r_.i64) / sizeof(r_.i64[0])] = { e0 };
r_.neon_i64 = vld1_s64(v);
#else
r_.i64[0] = e0;
#endif
return simde__m64_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_set_f32x2 (simde_float32 e1, simde_float32 e0) {
simde__m64_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const simde_float32 v[sizeof(r_.f32) / sizeof(r_.f32[0])] = { e0, e1 };
r_.neon_f32 = vld1_f32(v);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
#endif
return simde__m64_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_set1_pi8 (int8_t a) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_set1_pi8(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde__m64_private r_;
r_.neon_i8 = vmov_n_s8(a);
return simde__m64_from_private(r_);
#else
return simde_mm_set_pi8(a, a, a, a, a, a, a, a);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_set1_pi8(a) simde_mm_set1_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_set1_pi16 (int16_t a) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_set1_pi16(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde__m64_private r_;
r_.neon_i16 = vmov_n_s16(a);
return simde__m64_from_private(r_);
#else
return simde_mm_set_pi16(a, a, a, a);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_set1_pi16(a) simde_mm_set1_pi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_set1_pi32 (int32_t a) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_set1_pi32(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde__m64_private r_;
r_.neon_i32 = vmov_n_s32(a);
return simde__m64_from_private(r_);
#else
return simde_mm_set_pi32(a, a);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_set1_pi32(a) simde_mm_set1_pi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_setr_pi8 (int8_t e7, int8_t e6, int8_t e5, int8_t e4, int8_t e3, int8_t e2, int8_t e1, int8_t e0) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_setr_pi8(e7, e6, e5, e4, e3, e2, e1, e0);
#else
return simde_mm_set_pi8(e0, e1, e2, e3, e4, e5, e6, e7);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_setr_pi8(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_setr_pi8(e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_setr_pi16 (int16_t e3, int16_t e2, int16_t e1, int16_t e0) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_setr_pi16(e3, e2, e1, e0);
#else
return simde_mm_set_pi16(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_setr_pi16(e3, e2, e1, e0) simde_mm_setr_pi16(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_setr_pi32 (int32_t e1, int32_t e0) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_setr_pi32(e1, e0);
#else
return simde_mm_set_pi32(e0, e1);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_setr_pi32(e1, e0) simde_mm_setr_pi32(e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_setzero_si64 (void) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_setzero_si64();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
simde__m64_private r_;
r_.neon_u32 = vmov_n_u32(0);
return simde__m64_from_private(r_);
#else
return simde_mm_set_pi32(0, 0);
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_si64() simde_mm_setzero_si64()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_load_si64 (const void* mem_addr) {
simde__m64 r;
simde_memcpy(&r, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m64), sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_loadu_si64 (const void* mem_addr) {
simde__m64 r;
simde_memcpy(&r, mem_addr, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_x_mm_store_si64 (void* mem_addr, simde__m64 value) {
simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m64), &value, sizeof(value));
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_x_mm_storeu_si64 (void* mem_addr, simde__m64 value) {
simde_memcpy(mem_addr, &value, sizeof(value));
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_x_mm_setone_si64 (void) {
return simde_mm_set1_pi32(~INT32_C(0));
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sll_pi16 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sll_pi16(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)
#pragma clang diagnostic ignored "-Wvector-conversion"
#endif
r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16(HEDLEY_STATIC_CAST(int16_t, vget_lane_u64(count_.neon_u64, 0))));
HEDLEY_DIAGNOSTIC_POP
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT)
if (HEDLEY_UNLIKELY(count_.u64[0] > 15))
return simde_mm_setzero_si64();
r_.i16 = a_.i16 << HEDLEY_STATIC_CAST(int16_t, count_.u64[0]);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 << count_.u64[0];
#else
if (HEDLEY_UNLIKELY(count_.u64[0] > 15)) {
simde_memset(&r_, 0, sizeof(r_));
return simde__m64_from_private(r_);
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i] << count_.u64[0]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psllw(a, count) simde_mm_sll_pi16(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sll_pi16(a, count) simde_mm_sll_pi16(a, count)
# define _m_psllw(a, count) simde_mm_sll_pi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sll_pi32 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sll_pi32(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)
#pragma clang diagnostic ignored "-Wvector-conversion"
#endif
r_.neon_i32 = vshl_s32(a_.neon_i32, vmov_n_s32(HEDLEY_STATIC_CAST(int32_t, vget_lane_u64(count_.neon_u64, 0))));
HEDLEY_DIAGNOSTIC_POP
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 << count_.u64[0];
#else
if (HEDLEY_UNLIKELY(count_.u64[0] > 31)) {
simde_memset(&r_, 0, sizeof(r_));
return simde__m64_from_private(r_);
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] << count_.u64[0];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pslld(a, count) simde_mm_sll_pi32(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sll_pi32(a, count) simde_mm_sll_pi32(a, count)
# define _m_pslld(a, count) simde_mm_sll_pi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_slli_pi16 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_slli_pi16(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT)
if (HEDLEY_UNLIKELY(count > 15))
return simde_mm_setzero_si64();
r_.i16 = a_.i16 << HEDLEY_STATIC_CAST(int16_t, count);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 << count;
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16((int16_t) count));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = psllh_s(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i] << count);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psllwi(a, count) simde_mm_slli_pi16(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_slli_pi16(a, count) simde_mm_slli_pi16(a, count)
# define _m_psllwi(a, count) simde_mm_slli_pi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_slli_pi32 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_slli_pi32(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 << count;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vshl_s32(a_.neon_i32, vmov_n_s32((int32_t) count));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = psllw_s(a_.mmi_i32, b_.mmi_i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] << count;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pslldi(a, b) simde_mm_slli_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_slli_pi32(a, count) simde_mm_slli_pi32(a, count)
# define _m_pslldi(a, count) simde_mm_slli_pi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_slli_si64 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_slli_si64(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i64 = a_.i64 << count;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vshl_s64(a_.neon_i64, vmov_n_s64((int64_t) count));
#else
r_.u64[0] = a_.u64[0] << count;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psllqi(a, count) simde_mm_slli_si64(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_slli_si64(a, count) simde_mm_slli_si64(a, count)
# define _m_psllqi(a, count) simde_mm_slli_si64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sll_si64 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sll_si64(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vshl_s64(a_.neon_i64, count_.neon_i64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 << count_.i64;
#else
if (HEDLEY_UNLIKELY(count_.u64[0] > 63)) {
simde_memset(&r_, 0, sizeof(r_));
return simde__m64_from_private(r_);
}
r_.u64[0] = a_.u64[0] << count_.u64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psllq(a, count) simde_mm_sll_si64(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sll_si64(a, count) simde_mm_sll_si64(a, count)
# define _m_psllq(a, count) simde_mm_sll_si64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srl_pi16 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_srl_pi16(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_BUG_CLANG_POWER9_16x4_BAD_SHIFT)
if (HEDLEY_UNLIKELY(count_.u64[0] > 15))
return simde_mm_setzero_si64();
r_.u16 = a_.u16 >> HEDLEY_STATIC_CAST(uint16_t, count_.u64[0]);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = a_.u16 >> count_.u64[0];
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vshl_u16(a_.neon_u16, vmov_n_s16(-((int16_t) vget_lane_u64(count_.neon_u64, 0))));
#else
if (HEDLEY_UNLIKELY(count_.u64[0] > 15)) {
simde_memset(&r_, 0, sizeof(r_));
return simde__m64_from_private(r_);
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.u16) / sizeof(r_.u16[0]) ; i++) {
r_.u16[i] = a_.u16[i] >> count_.u64[0];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrlw(a, count) simde_mm_srl_pi16(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srl_pi16(a, count) simde_mm_srl_pi16(a, count)
# define _m_psrlw(a, count) simde_mm_srl_pi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srl_pi32 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_srl_pi32(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = a_.u32 >> count_.u64[0];
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vshl_u32(a_.neon_u32, vmov_n_s32(-((int32_t) vget_lane_u64(count_.neon_u64, 0))));
#else
if (HEDLEY_UNLIKELY(count_.u64[0] > 31)) {
simde_memset(&r_, 0, sizeof(r_));
return simde__m64_from_private(r_);
}
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.u32) / sizeof(r_.u32[0]) ; i++) {
r_.u32[i] = a_.u32[i] >> count_.u64[0];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrld(a, count) simde_mm_srl_pi32(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srl_pi32(a, count) simde_mm_srl_pi32(a, count)
# define _m_psrld(a, count) simde_mm_srl_pi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srli_pi16 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_srli_pi16(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = a_.u16 >> count;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vshl_u16(a_.neon_u16, vmov_n_s16(-((int16_t) count)));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = psrlh_s(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = a_.u16[i] >> count;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrlwi(a, count) simde_mm_srli_pi16(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srli_pi16(a, count) simde_mm_srli_pi16(a, count)
# define _m_psrlwi(a, count) simde_mm_srli_pi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srli_pi32 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_srli_pi32(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = a_.u32 >> count;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vshl_u32(a_.neon_u32, vmov_n_s32(-((int32_t) count)));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = psrlw_s(a_.mmi_i32, b_.mmi_i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] >> count;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrldi(a, count) simde_mm_srli_pi32(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srli_pi32(a, count) simde_mm_srli_pi32(a, count)
# define _m_psrldi(a, count) simde_mm_srli_pi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srli_si64 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_srli_si64(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u64 = vshl_u64(a_.neon_u64, vmov_n_s64(-count));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u64 = a_.u64 >> count;
#else
r_.u64[0] = a_.u64[0] >> count;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrlqi(a, count) simde_mm_srli_si64(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srli_si64(a, count) simde_mm_srli_si64(a, count)
# define _m_psrlqi(a, count) simde_mm_srli_si64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srl_si64 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_srl_si64(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vshl_u64(a_.neon_u64, vneg_s64(count_.neon_i64));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u64 = a_.u64 >> count_.u64;
#else
if (HEDLEY_UNLIKELY(count_.u64[0] > 63)) {
simde_memset(&r_, 0, sizeof(r_));
return simde__m64_from_private(r_);
}
r_.u64[0] = a_.u64[0] >> count_.u64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrlq(a, count) simde_mm_srl_si64(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srl_si64(a, count) simde_mm_srl_si64(a, count)
# define _m_psrlq(a, count) simde_mm_srl_si64(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srai_pi16 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_srai_pi16(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 >> (count & 0xff);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16(-HEDLEY_STATIC_CAST(int16_t, count)));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = psrah_s(a_.mmi_i16, count);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] >> (count & 0xff);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrawi(a, count) simde_mm_srai_pi16(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srai_pi16(a, count) simde_mm_srai_pi16(a, count)
# define _m_psrawi(a, count) simde_mm_srai_pi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_srai_pi32 (simde__m64 a, int count) {
#if defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_srai_pi32(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 >> (count & 0xff);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vshl_s32(a_.neon_i32, vmov_n_s32(-HEDLEY_STATIC_CAST(int32_t, count)));
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = psraw_s(a_.mmi_i32, count);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] >> (count & 0xff);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psradi(a, count) simde_mm_srai_pi32(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_srai_pi32(a, count) simde_mm_srai_pi32(a, count)
# define _m_psradi(a, count) simde_mm_srai_pi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sra_pi16 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sra_pi16(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
const int cnt = HEDLEY_STATIC_CAST(int, (count_.i64[0] > 15 ? 15 : count_.i64[0]));
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 >> cnt;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vshl_s16(a_.neon_i16, vmov_n_s16(-HEDLEY_STATIC_CAST(int16_t, vget_lane_u64(count_.neon_u64, 0))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] >> cnt;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psraw(a, count) simde_mm_sra_pi16(a, count)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sra_pi16(a, count) simde_mm_sra_pi16(a, count)
# define _m_psraw(a, count) simde_mm_sra_pi16(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sra_pi32 (simde__m64 a, simde__m64 count) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sra_pi32(a, count);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private count_ = simde__m64_to_private(count);
const int32_t cnt = (count_.u64[0] > 31) ? 31 : HEDLEY_STATIC_CAST(int32_t, count_.u64[0]);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 >> cnt;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vshl_s32(a_.neon_i32, vmov_n_s32(-HEDLEY_STATIC_CAST(int32_t, vget_lane_u64(count_.neon_u64, 0))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] >> cnt;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psrad(a, b) simde_mm_sra_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sra_pi32(a, count) simde_mm_sra_pi32(a, count)
# define _m_psrad(a, count) simde_mm_sra_pi32(a, count)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sub_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sub_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vsub_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = psubb_s(a_.mmi_i8, b_.mmi_i8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = a_.i8 - b_.i8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = a_.i8[i] - b_.i8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubb(a, b) simde_mm_sub_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sub_pi8(a, b) simde_mm_sub_pi8(a, b)
# define _m_psubb(a, b) simde_mm_sub_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sub_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sub_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vsub_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = psubh_s(a_.mmi_i16, b_.mmi_i16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = a_.i16 - b_.i16;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] - b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubw(a, b) simde_mm_sub_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sub_pi16(a, b) simde_mm_sub_pi16(a, b)
# define _m_psubw(a, b) simde_mm_sub_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sub_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_sub_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vsub_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = psubw_s(a_.mmi_i32, b_.mmi_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 - b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] - b_.i32[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubd(a, b) simde_mm_sub_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_sub_pi32(a, b) simde_mm_sub_pi32(a, b)
# define _m_psubd(a, b) simde_mm_sub_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_subs_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_subs_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vqsub_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = psubsb(a_.mmi_i8, b_.mmi_i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
if (((b_.i8[i]) > 0 && (a_.i8[i]) < INT8_MIN + (b_.i8[i]))) {
r_.i8[i] = INT8_MIN;
} else if ((b_.i8[i]) < 0 && (a_.i8[i]) > INT8_MAX + (b_.i8[i])) {
r_.i8[i] = INT8_MAX;
} else {
r_.i8[i] = (a_.i8[i]) - (b_.i8[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubsb(a, b) simde_mm_subs_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_subs_pi8(a, b) simde_mm_subs_pi8(a, b)
# define _m_psubsb(a, b) simde_mm_subs_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_subs_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_subs_pu8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vqsub_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_u8 = psubusb(a_.mmi_u8, b_.mmi_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
const int32_t x = a_.u8[i] - b_.u8[i];
if (x < 0) {
r_.u8[i] = 0;
} else if (x > UINT8_MAX) {
r_.u8[i] = UINT8_MAX;
} else {
r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, x);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubusb(a, b) simde_mm_subs_pu8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_subs_pu8(a, b) simde_mm_subs_pu8(a, b)
# define _m_psubusb(a, b) simde_mm_subs_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_subs_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_subs_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vqsub_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = psubsh(a_.mmi_i16, b_.mmi_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
if (((b_.i16[i]) > 0 && (a_.i16[i]) < SHRT_MIN + (b_.i16[i]))) {
r_.i16[i] = SHRT_MIN;
} else if ((b_.i16[i]) < 0 && (a_.i16[i]) > INT16_MAX + (b_.i16[i])) {
r_.i16[i] = INT16_MAX;
} else {
r_.i16[i] = (a_.i16[i]) - (b_.i16[i]);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubsw(a, b) simde_mm_subs_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_subs_pi16(a, b) simde_mm_subs_pi16(a, b)
# define _m_psubsw(a, b) simde_mm_subs_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_subs_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_subs_pu16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vqsub_u16(a_.neon_u16, b_.neon_u16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_u16 = psubush(a_.mmi_u16, b_.mmi_u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
const int x = a_.u16[i] - b_.u16[i];
if (x < 0) {
r_.u16[i] = 0;
} else if (x > UINT16_MAX) {
r_.u16[i] = UINT16_MAX;
} else {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, x);
}
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psubusw(a, b) simde_mm_subs_pu16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_subs_pu16(a, b) simde_mm_subs_pu16(a, b)
# define _m_psubusw(a, b) simde_mm_subs_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_unpackhi_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_unpackhi_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i8 = vzip2_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.i8, b_.i8, 4, 12, 5, 13, 6, 14, 7, 15);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = punpckhbh_s(a_.mmi_i8, b_.mmi_i8);
#else
r_.i8[0] = a_.i8[4];
r_.i8[1] = b_.i8[4];
r_.i8[2] = a_.i8[5];
r_.i8[3] = b_.i8[5];
r_.i8[4] = a_.i8[6];
r_.i8[5] = b_.i8[6];
r_.i8[6] = a_.i8[7];
r_.i8[7] = b_.i8[7];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_punpckhbw(a, b) simde_mm_unpackhi_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_pi8(a, b) simde_mm_unpackhi_pi8(a, b)
# define _m_punpckhbw(a, b) simde_mm_unpackhi_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_unpackhi_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_unpackhi_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i16 = vzip2_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = punpckhhw_s(a_.mmi_i16, b_.mmi_i16);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.i16, b_.i16, 2, 6, 3, 7);
#else
r_.i16[0] = a_.i16[2];
r_.i16[1] = b_.i16[2];
r_.i16[2] = a_.i16[3];
r_.i16[3] = b_.i16[3];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_punpckhwd(a, b) simde_mm_unpackhi_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_pi16(a, b) simde_mm_unpackhi_pi16(a, b)
# define _m_punpckhwd(a, b) simde_mm_unpackhi_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_unpackhi_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_unpackhi_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i32 = vzip2_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = punpckhwd_s(a_.mmi_i32, b_.mmi_i32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.i32, b_.i32, 1, 3);
#else
r_.i32[0] = a_.i32[1];
r_.i32[1] = b_.i32[1];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_punpckhdq(a, b) simde_mm_unpackhi_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_pi32(a, b) simde_mm_unpackhi_pi32(a, b)
# define _m_punpckhdq(a, b) simde_mm_unpackhi_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_unpacklo_pi8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_unpacklo_pi8(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i8 = vzip1_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i8 = punpcklbh_s(a_.mmi_i8, b_.mmi_i8);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 8, a_.i8, b_.i8, 0, 8, 1, 9, 2, 10, 3, 11);
#else
r_.i8[0] = a_.i8[0];
r_.i8[1] = b_.i8[0];
r_.i8[2] = a_.i8[1];
r_.i8[3] = b_.i8[1];
r_.i8[4] = a_.i8[2];
r_.i8[5] = b_.i8[2];
r_.i8[6] = a_.i8[3];
r_.i8[7] = b_.i8[3];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_punpcklbw(a, b) simde_mm_unpacklo_pi8(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_pi8(a, b) simde_mm_unpacklo_pi8(a, b)
# define _m_punpcklbw(a, b) simde_mm_unpacklo_pi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_unpacklo_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_unpacklo_pi16(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i16 = vzip1_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i16 = punpcklhw_s(a_.mmi_i16, b_.mmi_i16);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, a_.i16, b_.i16, 0, 4, 1, 5);
#else
r_.i16[0] = a_.i16[0];
r_.i16[1] = b_.i16[0];
r_.i16[2] = a_.i16[1];
r_.i16[3] = b_.i16[1];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_punpcklwd(a, b) simde_mm_unpacklo_pi16(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_pi16(a, b) simde_mm_unpacklo_pi16(a, b)
# define _m_punpcklwd(a, b) simde_mm_unpacklo_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_unpacklo_pi32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_unpacklo_pi32(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i32 = vzip1_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_MIPS_LOONGSON_MMI_NATIVE)
r_.mmi_i32 = punpcklwd_s(a_.mmi_i32, b_.mmi_i32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 8, a_.i32, b_.i32, 0, 2);
#else
r_.i32[0] = a_.i32[0];
r_.i32[1] = b_.i32[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_punpckldq(a, b) simde_mm_unpacklo_pi32(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_pi32(a, b) simde_mm_unpacklo_pi32(a, b)
# define _m_punpckldq(a, b) simde_mm_unpacklo_pi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_xor_si64 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _mm_xor_si64(a, b);
#else
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veor_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
r_.u64[0] = a_.u64[0] ^ b_.u64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pxor(a, b) simde_mm_xor_si64(a, b)
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _mm_xor_si64(a, b) simde_mm_xor_si64(a, b)
# define _m_pxor(a, b) simde_mm_xor_si64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_m_to_int (simde__m64 a) {
#if defined(SIMDE_X86_MMX_NATIVE)
return _m_to_int(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wvector-conversion") && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)
#pragma clang diagnostic ignored "-Wvector-conversion"
#endif
return vget_lane_s32(a_.neon_i32, 0);
HEDLEY_DIAGNOSTIC_POP
#else
return a_.i32[0];
#endif
#endif
}
#if defined(SIMDE_X86_MMX_ENABLE_NATIVE_ALIASES)
# define _m_to_int(a) simde_m_to_int(a)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_MMX_H) */
/* :: End x86/mmx.h :: */
#if defined(_WIN32)
#include <windows.h>
#endif
#if defined(__ARM_ACLE)
#include <arm_acle.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_MIPS_MSA_NATIVE)
v16i8 msa_i8;
v8i16 msa_i16;
v4i32 msa_i32;
v2i64 msa_i64;
v16u8 msa_u8;
v8u16 msa_u16;
v4u32 msa_u32;
v2u64 msa_u64;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) m = { ~0U, 0U, 0U, 0U };
r_.altivec_f32 = vec_sel(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_broadcastlow_ps(simde__m128 a) {
/* This function broadcasts the first element in the inpu vector to
* all lanes. It is used to avoid generating spurious exceptions in
* *_ss functions since there may be garbage in the upper lanes. */
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_shuffle_ps(a, a, 0);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdupq_laneq_f32(a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_splat(a_.altivec_f32, 0);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[0];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) && !defined(SIMDE_BUG_GCC_100761)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
simde_float32 mask_;
uint32_t u32_ = UINT32_C(0x7FFFFFFF);
simde_memcpy(&mask_, &u32_, sizeof(u32_));
return _mm_and_ps(_mm_set1_ps(mask_), a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#if defined(SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) && !defined(SIMDE_BUG_GCC_100761)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) && !defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
a_ = simde__m64_to_private(a);
a_.i16[imm8] = i;
return simde__m64_from_private(a_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) && !defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vminq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_pmin(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
uint32_t SIMDE_VECTOR(16) m = HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32 < b_.f32);
r_.f32 =
HEDLEY_REINTERPRET_CAST(
__typeof__(r_.f32),
( (HEDLEY_REINTERPRET_CAST(__typeof__(m), a_.f32) & m) |
(HEDLEY_REINTERPRET_CAST(__typeof__(m), b_.f32) & ~m)
)
);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
static const uint32_t md[4] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3
};
uint32x4_t extended = vreinterpretq_u32_s32(vshrq_n_s32(a_.neon_i32, 31));
uint32x4_t masked = vandq_u32(vld1q_u32(md), extended);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(int32_t, vaddvq_u32(masked));
#else
uint64x2_t t64 = vpaddlq_u32(masked);
return
HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 0)) +
HEDLEY_STATIC_CAST(int, vgetq_lane_u64(t64, 1));
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx));
return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx);
return HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ENTA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ENTA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (const void* p, int i) {
#if \
HEDLEY_HAS_BUILTIN(__builtin_prefetch) || \
HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
HEDLEY_INTEL_VERSION_CHECK(13,0,0)
switch(i) {
case SIMDE_MM_HINT_NTA:
__builtin_prefetch(p, 0, 0);
break;
case SIMDE_MM_HINT_T0:
__builtin_prefetch(p, 0, 3);
break;
case SIMDE_MM_HINT_T1:
__builtin_prefetch(p, 0, 2);
break;
case SIMDE_MM_HINT_T2:
__builtin_prefetch(p, 0, 1);
break;
case SIMDE_MM_HINT_ENTA:
__builtin_prefetch(p, 1, 0);
break;
case SIMDE_MM_HINT_ET0:
__builtin_prefetch(p, 1, 3);
break;
case SIMDE_MM_HINT_ET1:
__builtin_prefetch(p, 1, 2);
break;
case SIMDE_MM_HINT_ET2:
__builtin_prefetch(p, 0, 1);
break;
}
#elif defined(__ARM_ACLE)
#if (__ARM_ACLE >= 101)
switch(i) {
case SIMDE_MM_HINT_NTA:
__pldx(0, 0, 1, p);
break;
case SIMDE_MM_HINT_T0:
__pldx(0, 0, 0, p);
break;
case SIMDE_MM_HINT_T1:
__pldx(0, 1, 0, p);
break;
case SIMDE_MM_HINT_T2:
__pldx(0, 2, 0, p);
break;
case SIMDE_MM_HINT_ENTA:
__pldx(1, 0, 1, p);
break;
case SIMDE_MM_HINT_ET0:
__pldx(1, 0, 0, p);
break;
case SIMDE_MM_HINT_ET1:
__pldx(1, 1, 0, p);
break;
case SIMDE_MM_HINT_ET2:
__pldx(1, 2, 0, p);
break;
}
#else
(void) i;
__pld(p)
#endif
#elif HEDLEY_PGI_VERSION_CHECK(10,0,0)
(void) i;
#pragma mem prefetch p
#elif HEDLEY_CRAY_VERSION_CHECK(8,1,0)
switch (i) {
case SIMDE_MM_HINT_NTA:
#pragma _CRI prefetch (nt) p
break;
case SIMDE_MM_HINT_T0:
case SIMDE_MM_HINT_T1:
case SIMDE_MM_HINT_T2:
#pragma _CRI prefetch p
break;
case SIMDE_MM_HINT_ENTA:
#pragma _CRI prefetch (write, nt) p
break;
case SIMDE_MM_HINT_ET0:
case SIMDE_MM_HINT_ET1:
case SIMDE_MM_HINT_ET2:
#pragma _CRI prefetch (write) p
break;
}
#elif HEDLEY_IBM_VERSION_CHECK(11,0,0)
switch(i) {
case SIMDE_MM_HINT_NTA:
__prefetch_by_load(p, 0, 0);
break;
case SIMDE_MM_HINT_T0:
__prefetch_by_load(p, 0, 3);
break;
case SIMDE_MM_HINT_T1:
__prefetch_by_load(p, 0, 2);
break;
case SIMDE_MM_HINT_T2:
__prefetch_by_load(p, 0, 1);
break;
case SIMDE_MM_HINT_ENTA:
__prefetch_by_load(p, 1, 0);
break;
case SIMDE_MM_HINT_ET0:
__prefetch_by_load(p, 1, 3);
break;
case SIMDE_MM_HINT_ET1:
__prefetch_by_load(p, 1, 2);
break;
case SIMDE_MM_HINT_ET2:
__prefetch_by_load(p, 0, 1);
break;
}
#endif
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint64x1_t t = vpaddl_u32(vpaddl_u16(vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8))));
r_.neon_u16 = vset_lane_u16(HEDLEY_STATIC_CAST(uint64_t, vget_lane_u64(t, 0)), vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, simde_math_abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_shuffle_ps(a, b, imm8) \
(__extension__({ \
float32x4_t simde_mm_shuffle_ps_a_ = simde__m128i_to_neon_f32(a); \
float32x4_t simde_mm_shuffle_ps_b_ = simde__m128i_to_neon_f32(b); \
float32x4_t simde_mm_shuffle_ps_r_; \
\
simde_mm_shuffle_ps_r_ = vmovq_n_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, (imm8) & (0x3))); \
simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_a_, ((imm8) >> 2) & 0x3), simde_mm_shuffle_ps_r_, 1); \
simde_mm_shuffle_ps_r_ = vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 4) & 0x3), simde_mm_shuffle_ps_r_, 2); \
vsetq_lane_f32(vgetq_lane_f32(simde_mm_shuffle_ps_b_, ((imm8) >> 6) & 0x3), simde_mm_shuffle_ps_r_, 3); \
}))
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(simde_x_mm_broadcastlow_ps(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(simde_x_mm_broadcastlow_ps(a), simde_x_mm_broadcastlow_ps(b)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t SIMDE_MM_TRANSPOSE4_PS_ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_low_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[0]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW01.val[1]), \
vget_high_f32(SIMDE_MM_TRANSPOSE4_PS_ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp0; \
SIMDE_MM_TRANSPOSE4_PS_tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
SIMDE_MM_TRANSPOSE4_PS_tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
SIMDE_MM_TRANSPOSE4_PS_tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp0, SIMDE_MM_TRANSPOSE4_PS_tmp2); \
row1 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp2, SIMDE_MM_TRANSPOSE4_PS_tmp0); \
row2 = simde_mm_movelh_ps(SIMDE_MM_TRANSPOSE4_PS_tmp1, SIMDE_MM_TRANSPOSE4_PS_tmp3); \
row3 = simde_mm_movehl_ps(SIMDE_MM_TRANSPOSE4_PS_tmp3, SIMDE_MM_TRANSPOSE4_PS_tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
/* :: End x86/sse.h :: */
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 simde_float64 f64[2];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE2_NATIVE)
SIMDE_ALIGN_TO_16 __m128i n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
#if defined(__ARM_FP16_FORMAT_IEEE)
SIMDE_ALIGN_TO_16 float16x8_t neon_f16;
#endif
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_MIPS_MSA_NATIVE)
v16i8 msa_i8;
v8i16 msa_i16;
v4i32 msa_i32;
v2i64 msa_i64;
v16u8 msa_u8;
v8u16 msa_u16;
v4u32 msa_u32;
v2u64 msa_u64;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
#if defined(__UINT_FAST32_TYPE__) && (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE))
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f;
#else
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f;
#endif
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
#if defined(__UINT_FAST32_TYPE__) && (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE))
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f;
#else
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f;
#endif
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128i_private;
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 simde_float64 f64[2];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE2_NATIVE)
SIMDE_ALIGN_TO_16 __m128d n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_MIPS_MSA_NATIVE)
v16i8 msa_i8;
v8i16 msa_i16;
v4i32 msa_i32;
v2i64 msa_i64;
v16u8 msa_u8;
v8u16 msa_u16;
v4u32 msa_u32;
v2u64 msa_u64;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
#if defined(__INT_FAST32_TYPE__) && (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE))
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f;
#else
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f;
#endif
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
#if defined(__UINT_FAST32_TYPE__) && (defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE))
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f;
#else
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f;
#endif
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128d_private;
#if defined(SIMDE_X86_SSE2_NATIVE)
typedef __m128i simde__m128i;
typedef __m128d simde__m128d;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef int64x2_t simde__m128i;
# if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
typedef float64x2_t simde__m128d;
# elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float64 simde__m128d SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
# else
typedef simde__m128d_private simde__m128d;
# endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128i;
typedef v128_t simde__m128d;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128i;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(double) simde__m128d;
#else
typedef simde__m128d_private simde__m128d;
#endif
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef int64_t simde__m128i SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
typedef simde_float64 simde__m128d SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128i_private simde__m128i;
typedef simde__m128d_private simde__m128d;
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
typedef simde__m128i __m128i;
typedef simde__m128d __m128d;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128i), "simde__m128i size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128i_private), "simde__m128i_private size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128d), "simde__m128d size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128d_private), "simde__m128d_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128i) == 16, "simde__m128i is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128i_private) == 16, "simde__m128i_private is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128d) == 16, "simde__m128d is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128d_private) == 16, "simde__m128d_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde__m128i_from_private(simde__m128i_private v) {
simde__m128i r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i_private
simde__m128i_to_private(simde__m128i v) {
simde__m128i_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde__m128d_from_private(simde__m128d_private v) {
simde__m128d r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d_private
simde__m128d_to_private(simde__m128d v) {
simde__m128d_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, float64x2_t, neon, f64)
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, float64x2_t, neon, f64)
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(double)
simde__m128d_to_altivec_f64(simde__m128d value) {
simde__m128d_private r_ = simde__m128d_to_private(value);
return r_.altivec_f64;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde__m128d_from_altivec_f64(SIMDE_POWER_ALTIVEC_VECTOR(double) value) {
simde__m128d_private r_;
r_.altivec_f64 = value;
return simde__m128d_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, SIMDE_POWER_ALTIVEC_VECTOR(double), altivec, f64)
#endif
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, v128_t, wasm, v128);
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, v128_t, wasm, v128);
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_set_pd (simde_float64 e1, simde_float64 e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_pd(e1, e0);
#else
simde__m128d_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_make(e0, e1);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 simde_float64 data[2] = { e0, e1 };
r_.neon_f64 = vld1q_f64(data);
#else
r_.f64[0] = e0;
r_.f64[1] = e1;
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_pd(e1, e0) simde_mm_set_pd(e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_set1_pd (simde_float64 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set1_pd(a);
#else
simde__m128d_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_splat(a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vdupq_n_f64(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = vec_splats(HEDLEY_STATIC_CAST(double, a));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.f64[i] = a;
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#define simde_mm_set_pd1(a) simde_mm_set1_pd(a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set1_pd(a) simde_mm_set1_pd(a)
#define _mm_set_pd1(a) simde_mm_set1_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_abs_pd(simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
simde_float64 mask_;
uint64_t u64_ = UINT64_C(0x7FFFFFFFFFFFFFFF);
simde_memcpy(&mask_, &u64_, sizeof(u64_));
return _mm_and_pd(_mm_set1_pd(mask_), a);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vabsq_f64(a_.neon_f64);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = vec_abs(a_.altivec_f64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_fabs(a_.f64[i]);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_not_pd(simde__m128d a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castpd_si128(a);
return _mm_castsi128_pd(_mm_ternarylogic_epi64(ai, ai, ai, 0x55));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f64 = vec_nor(a_.altivec_f64, a_.altivec_f64);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = ~a_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = ~(a_.i32f[i]);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_select_pd(simde__m128d a, simde__m128d b, simde__m128d mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_pd, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_pd(a, b, mask);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b),
mask_ = simde__m128d_to_private(mask);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 ^ ((a_.i64 ^ b_.i64) & mask_.i64);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vbslq_s64(mask_.neon_u64, b_.neon_i64, a_.neon_i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] ^ ((a_.i64[i] ^ b_.i64[i]) & mask_.i64[i]);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_add_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_add_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vaddq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i8 = vec_add(a_.altivec_i8, b_.altivec_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = a_.i8 + b_.i8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = a_.i8[i] + b_.i8[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_epi8(a, b) simde_mm_add_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_add_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_add_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vaddq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i16 = vec_add(a_.altivec_i16, b_.altivec_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = a_.i16 + b_.i16;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] + b_.i16[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_epi16(a, b) simde_mm_add_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_add_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_add_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vaddq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_add(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 + b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] + b_.i32[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_epi32(a, b) simde_mm_add_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_add_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_add_epi64(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vaddq_s64(a_.neon_i64, b_.neon_i64);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_i64 = vec_add(a_.altivec_i64, b_.altivec_i64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 + b_.i64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] + b_.i64[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_epi64(a, b) simde_mm_add_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_add_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_add_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vaddq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f64 = vec_add(a_.altivec_f64, b_.altivec_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = a_.f64 + b_.f64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f64[i] + b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_pd(a, b) simde_mm_add_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_move_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_move_sd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vsetq_lane_f64(vgetq_lane_f64(b_.neon_f64, 0), a_.neon_f64, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
#if defined(HEDLEY_IBM_VERSION)
r_.altivec_f64 = vec_xxpermdi(a_.altivec_f64, b_.altivec_f64, 1);
#else
r_.altivec_f64 = vec_xxpermdi(b_.altivec_f64, a_.altivec_f64, 1);
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 2, 1);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 2, 1);
#else
r_.f64[0] = b_.f64[0];
r_.f64[1] = a_.f64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_move_sd(a, b) simde_mm_move_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_broadcastlow_pd(simde__m128d a) {
/* This function broadcasts the first element in the input vector to
* all lanes. It is used to avoid generating spurious exceptions in
* *_sd functions since there may be garbage in the upper lanes. */
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castsi128_pd(_mm_shuffle_epi32(_mm_castpd_si128(a), 0x44));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vdupq_laneq_f64(a_.neon_f64, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f64 = vec_splat(a_.altivec_f64, 0);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 0, 0);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f64[0];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_add_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_add_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_add_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_add_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.f64[0] = a_.f64[0] + b_.f64[0];
r_.f64[1] = a_.f64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_sd(a, b) simde_mm_add_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_add_si64 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_add_si64(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vadd_s64(a_.neon_i64, b_.neon_i64);
#else
r_.i64[0] = a_.i64[0] + b_.i64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_add_si64(a, b) simde_mm_add_si64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_adds_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_adds_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vqaddq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_add_sat(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i8 = vec_adds(a_.altivec_i8, b_.altivec_i8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = simde_math_adds_i8(a_.i8[i], b_.i8[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_adds_epi8(a, b) simde_mm_adds_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_adds_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_adds_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vqaddq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_add_sat(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i16 = vec_adds(a_.altivec_i16, b_.altivec_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = simde_math_adds_i16(a_.i16[i], b_.i16[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_adds_epi16(a, b) simde_mm_adds_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_adds_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_adds_epu8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vqaddq_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_add_sat(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_u8 = vec_adds(a_.altivec_u8, b_.altivec_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = simde_math_adds_u8(a_.u8[i], b_.u8[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_adds_epu8(a, b) simde_mm_adds_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_adds_epu16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_adds_epu16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vqaddq_u16(a_.neon_u16, b_.neon_u16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u16x8_add_sat(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_u16 = vec_adds(a_.altivec_u16, b_.altivec_u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = simde_math_adds_u16(a_.u16[i], b_.u16[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_adds_epu16(a, b) simde_mm_adds_epu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_and_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_and_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f64 = vec_and(a_.altivec_f64, b_.altivec_f64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] & b_.i32f[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_and_pd(a, b) simde_mm_and_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_and_si128 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_and_si128(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_u32f = vec_and(a_.altivec_u32f, b_.altivec_u32f);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] & b_.i32f[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_and_si128(a, b) simde_mm_and_si128(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_andnot_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_andnot_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = vec_andc(b_.altivec_f64, a_.altivec_f64);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32f = vec_andc(b_.altivec_i32f, a_.altivec_i32f);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = ~a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = ~a_.u64[i] & b_.u64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_andnot_pd(a, b) simde_mm_andnot_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_andnot_si128 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_andnot_si128(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_andc(b_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = ~a_.i32f & b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = ~(a_.i32f[i]) & b_.i32f[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_andnot_si128(a, b) simde_mm_andnot_si128(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_xor_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_xor_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = veorq_s64(a_.neon_i64, b_.neon_i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] ^ b_.i32f[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_xor_pd(a, b) simde_mm_xor_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_avg_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_avg_epu8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhaddq_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_avgr(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u8 = vec_avg(a_.altivec_u8, b_.altivec_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(32);
uint16_t wb SIMDE_VECTOR(32);
uint16_t wr SIMDE_VECTOR(32);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_avg_epu8(a, b) simde_mm_avg_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_avg_epu16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_avg_epu16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhaddq_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u16x8_avgr(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u16 = vec_avg(a_.altivec_u16, b_.altivec_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(32);
uint32_t wb SIMDE_VECTOR(32);
uint32_t wr SIMDE_VECTOR(32);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_avg_epu16(a, b) simde_mm_avg_epu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_setzero_si128 (void) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_setzero_si128();
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vdupq_n_s32(0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_splats(HEDLEY_STATIC_CAST(signed int, 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_splat(INT32_C(0));
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
r_.i32 = __extension__ (__typeof__(r_.i32)) { 0, 0, 0, 0 };
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = 0;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setzero_si128() (simde_mm_setzero_si128())
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_bslli_si128 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
if (HEDLEY_UNLIKELY((imm8 & ~15))) {
return simde_mm_setzero_si128();
}
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_ENDIAN_ORDER)
r_.altivec_i8 =
#if (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
vec_slo
#else /* SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG */
vec_sro
#endif
(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, imm8 * 8)));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = vec_srb(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, (imm8 & 15) << 3)));
#elif defined(SIMDE_HAVE_INT128_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
r_.u128[0] = a_.u128[0] << (imm8 * 8);
#else
r_ = simde__m128i_to_private(simde_mm_setzero_si128());
for (int i = imm8 ; i < HEDLEY_STATIC_CAST(int, sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = a_.i8[i - imm8];
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
#define simde_mm_bslli_si128(a, imm8) _mm_slli_si128(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__)
#define simde_mm_bslli_si128(a, imm8) \
simde__m128i_from_neon_i8(((imm8) <= 0) ? simde__m128i_to_neon_i8(a) : (((imm8) > 15) ? (vdupq_n_s8(0)) : (vextq_s8(vdupq_n_s8(0), simde__m128i_to_neon_i8(a), 16 - (imm8)))))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && !defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
#define simde_mm_bslli_si128(a, imm8) (__extension__ ({ \
const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \
const simde__m128i_private simde__tmp_z_ = simde__m128i_to_private(simde_mm_setzero_si128()); \
simde__m128i_private simde__tmp_r_; \
if (HEDLEY_UNLIKELY(imm8 > 15)) { \
simde__tmp_r_ = simde__m128i_to_private(simde_mm_setzero_si128()); \
} else { \
simde__tmp_r_.i8 = \
SIMDE_SHUFFLE_VECTOR_(8, 16, \
simde__tmp_z_.i8, \
(simde__tmp_a_).i8, \
HEDLEY_STATIC_CAST(int8_t, (16 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (17 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (18 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (19 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (20 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (21 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (22 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (23 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (24 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (25 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (26 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (27 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (28 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (29 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (30 - imm8) & 31), \
HEDLEY_STATIC_CAST(int8_t, (31 - imm8) & 31)); \
} \
simde__m128i_from_private(simde__tmp_r_); }))
#endif
#define simde_mm_slli_si128(a, imm8) simde_mm_bslli_si128(a, imm8)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_bslli_si128(a, imm8) simde_mm_bslli_si128(a, imm8)
#define _mm_slli_si128(a, imm8) simde_mm_bslli_si128(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_bsrli_si128 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
if (HEDLEY_UNLIKELY((imm8 & ~15))) {
return simde_mm_setzero_si128();
}
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_ENDIAN_ORDER)
r_.altivec_i8 =
#if (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
vec_sro
#else /* SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG */
vec_slo
#endif
(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, imm8 * 8)));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = vec_slb(a_.altivec_i8, vec_splats(HEDLEY_STATIC_CAST(unsigned char, (imm8 & 15) << 3)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
const int e = HEDLEY_STATIC_CAST(int, i) + imm8;
r_.i8[i] = (e < 16) ? a_.i8[e] : 0;
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
#define simde_mm_bsrli_si128(a, imm8) _mm_srli_si128(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__)
#define simde_mm_bsrli_si128(a, imm8) \
simde__m128i_from_neon_i8(((imm8 < 0) || (imm8 > 15)) ? vdupq_n_s8(0) : (vextq_s8(simde__m128i_to_private(a).neon_i8, vdupq_n_s8(0), ((imm8 & 15) != 0) ? imm8 : (imm8 & 15))))
#elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && !defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
#define simde_mm_bsrli_si128(a, imm8) (__extension__ ({ \
const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \
const simde__m128i_private simde__tmp_z_ = simde__m128i_to_private(simde_mm_setzero_si128()); \
simde__m128i_private simde__tmp_r_ = simde__m128i_to_private(a); \
if (HEDLEY_UNLIKELY(imm8 > 15)) { \
simde__tmp_r_ = simde__m128i_to_private(simde_mm_setzero_si128()); \
} else { \
simde__tmp_r_.i8 = \
SIMDE_SHUFFLE_VECTOR_(8, 16, \
simde__tmp_z_.i8, \
(simde__tmp_a_).i8, \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 16) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 17) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 18) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 19) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 20) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 21) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 22) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 23) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 24) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 25) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 26) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 27) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 28) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 29) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 30) & 31), \
HEDLEY_STATIC_CAST(int8_t, (imm8 + 31) & 31)); \
} \
simde__m128i_from_private(simde__tmp_r_); }))
#endif
#define simde_mm_srli_si128(a, imm8) simde_mm_bsrli_si128((a), (imm8))
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_bsrli_si128(a, imm8) simde_mm_bsrli_si128((a), (imm8))
#define _mm_srli_si128(a, imm8) simde_mm_bsrli_si128((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_clflush (void const* p) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_clflush(p);
#else
(void) p;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_clflush(a, b) simde_mm_clflush()
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_comieq_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return !!vgetq_lane_u64(vceqq_f64(a_.neon_f64, b_.neon_f64), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) == wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#else
return a_.f64[0] == b_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_comieq_sd(a, b) simde_mm_comieq_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_comige_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return !!vgetq_lane_u64(vcgeq_f64(a_.neon_f64, b_.neon_f64), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) >= wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#else
return a_.f64[0] >= b_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_comige_sd(a, b) simde_mm_comige_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_comigt_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return !!vgetq_lane_u64(vcgtq_f64(a_.neon_f64, b_.neon_f64), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) > wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#else
return a_.f64[0] > b_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_comigt_sd(a, b) simde_mm_comigt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_comile_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return !!vgetq_lane_u64(vcleq_f64(a_.neon_f64, b_.neon_f64), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) <= wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#else
return a_.f64[0] <= b_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_comile_sd(a, b) simde_mm_comile_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_comilt_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return !!vgetq_lane_u64(vcltq_f64(a_.neon_f64, b_.neon_f64), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) < wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#else
return a_.f64[0] < b_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_comilt_sd(a, b) simde_mm_comilt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_comineq_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return !vgetq_lane_u64(vceqq_f64(a_.neon_f64, b_.neon_f64), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) != wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#else
return a_.f64[0] != b_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_comineq_sd(a, b) simde_mm_comineq_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_copysign_pd(simde__m128d dest, simde__m128d src) {
simde__m128d_private
r_,
dest_ = simde__m128d_to_private(dest),
src_ = simde__m128d_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t sign_pos = vreinterpretq_u64_f64(vdupq_n_f64(-SIMDE_FLOAT64_C(0.0)));
#else
simde_float64 dbl_nz = -SIMDE_FLOAT64_C(0.0);
uint64_t u64_nz;
simde_memcpy(&u64_nz, &dbl_nz, sizeof(u64_nz));
uint64x2_t sign_pos = vdupq_n_u64(u64_nz);
#endif
r_.neon_u64 = vbslq_u64(sign_pos, src_.neon_u64, dest_.neon_u64);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
#if defined(SIMDE_BUG_VEC_CPSGN_REVERSED_ARGS)
r_.altivec_f64 = vec_cpsgn(dest_.altivec_f64, src_.altivec_f64);
#else
r_.altivec_f64 = vec_cpsgn(src_.altivec_f64, dest_.altivec_f64);
#endif
#elif defined(simde_math_copysign)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_copysign(dest_.f64[i], src_.f64[i]);
}
#else
simde__m128d sgnbit = simde_mm_set1_pd(-SIMDE_FLOAT64_C(0.0));
return simde_mm_xor_pd(simde_mm_and_pd(sgnbit, src), simde_mm_andnot_pd(sgnbit, dest));
#endif
return simde__m128d_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_xorsign_pd(simde__m128d dest, simde__m128d src) {
return simde_mm_xor_pd(simde_mm_and_pd(simde_mm_set1_pd(-0.0), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_castpd_ps (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castpd_ps(a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f32_f64(a);
#else
simde__m128 r;
simde_memcpy(&r, &a, sizeof(a));
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_castpd_ps(a) simde_mm_castpd_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_castpd_si128 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castpd_si128(a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_s64_f64(a);
#else
simde__m128i r;
simde_memcpy(&r, &a, sizeof(a));
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_castpd_si128(a) simde_mm_castpd_si128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_castps_pd (simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castps_pd(a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_f32(a);
#else
simde__m128d r;
simde_memcpy(&r, &a, sizeof(a));
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_castps_pd(a) simde_mm_castps_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_castps_si128 (simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castps_si128(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128i_from_neon_i32(simde__m128_to_private(a).neon_i32);
#else
simde__m128i r;
simde_memcpy(&r, &a, sizeof(a));
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_castps_si128(a) simde_mm_castps_si128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_castsi128_pd (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castsi128_pd(a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vreinterpretq_f64_s64(a);
#else
simde__m128d r;
simde_memcpy(&r, &a, sizeof(a));
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_castsi128_pd(a) simde_mm_castsi128_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_castsi128_ps (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_castsi128_ps(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_i32(simde__m128i_to_private(a).neon_i32);
#else
simde__m128 r;
simde_memcpy(&r, &a, sizeof(a));
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_castsi128_ps(a) simde_mm_castsi128_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmpeq_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpeq_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vceqq_s8(b_.neon_i8, a_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmpeq(a_.altivec_i8, b_.altivec_i8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (a_.i8 == b_.i8));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] == b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_epi8(a, b) simde_mm_cmpeq_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmpeq_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpeq_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vceqq_s16(b_.neon_i16, a_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmpeq(a_.altivec_i16, b_.altivec_i16));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = (a_.i16 == b_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] == b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_epi16(a, b) simde_mm_cmpeq_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmpeq_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpeq_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpeq(a_.altivec_i32, b_.altivec_i32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 == b_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] == b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_epi32(a, b) simde_mm_cmpeq_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpeq_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpeq_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vceqq_f64(b_.neon_f64, a_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpeq(a_.altivec_f64, b_.altivec_f64));
#elif defined(SIMDE_MIPS_MSA_NATIVE)
r_.msa_i32 = __msa_addv_w(a_.msa_i32, b_.msa_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_pd(a, b) simde_mm_cmpeq_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpeq_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpeq_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmpeq_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmpeq_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.u64[0] = (a_.u64[0] == b_.u64[0]) ? ~UINT64_C(0) : 0;
r_.u64[1] = a_.u64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpeq_sd(a, b) simde_mm_cmpeq_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpneq_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpneq_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u32 = vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(b_.neon_f64, a_.neon_f64)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpneq_pd(a, b) simde_mm_cmpneq_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpneq_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpneq_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmpneq_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmpneq_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.u64[0] = (a_.f64[0] != b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpneq_sd(a, b) simde_mm_cmpneq_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmplt_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmplt_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcltq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char),vec_cmplt(a_.altivec_i8, b_.altivec_i8));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (a_.i8 < b_.i8));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] < b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_epi8(a, b) simde_mm_cmplt_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmplt_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmplt_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcltq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmplt(a_.altivec_i16, b_.altivec_i16));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (a_.i16 < b_.i16));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_epi16(a, b) simde_mm_cmplt_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmplt_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmplt_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmplt(a_.altivec_i32, b_.altivec_i32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.i32 < b_.i32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] < b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_epi32(a, b) simde_mm_cmplt_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmplt_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmplt_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcltq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmplt(a_.altivec_f64, b_.altivec_f64));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_pd(a, b) simde_mm_cmplt_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmplt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmplt_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmplt_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmplt_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.u64[0] = (a_.f64[0] < b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmplt_sd(a, b) simde_mm_cmplt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmple_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmple_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64));
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcleq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmple(a_.altivec_f64, b_.altivec_f64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmple_pd(a, b) simde_mm_cmple_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmple_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmple_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmple_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmple_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.u64[0] = (a_.f64[0] <= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmple_sd(a, b) simde_mm_cmple_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmpgt_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpgt_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vcgtq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed char), vec_cmpgt(a_.altivec_i8, b_.altivec_i8));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), (a_.i8 > b_.i8));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = (a_.i8[i] > b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_epi8(a, b) simde_mm_cmpgt_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmpgt_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpgt_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vcgtq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed short), vec_cmpgt(a_.altivec_i16, b_.altivec_i16));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), (a_.i16 > b_.i16));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_epi16(a, b) simde_mm_cmpgt_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cmpgt_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpgt_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), vec_cmpgt(a_.altivec_i32, b_.altivec_i32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), (a_.i32 > b_.i32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = (a_.i32[i] > b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_epi32(a, b) simde_mm_cmpgt_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpgt_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpgt_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64));
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcgtq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpgt(a_.altivec_f64, b_.altivec_f64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_pd(a, b) simde_mm_cmpgt_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpgt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
return _mm_cmpgt_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmpgt_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmpgt_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.u64[0] = (a_.f64[0] > b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpgt_sd(a, b) simde_mm_cmpgt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpge_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpge_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64));
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_u64 = vcgeq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double), vec_cmpge(a_.altivec_f64, b_.altivec_f64));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0) : UINT64_C(0);
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpge_pd(a, b) simde_mm_cmpge_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpge_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
return _mm_cmpge_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmpge_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmpge_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.u64[0] = (a_.f64[0] >= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpge_sd(a, b) simde_mm_cmpge_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpngt_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpngt_pd(a, b);
#else
return simde_mm_cmple_pd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpngt_pd(a, b) simde_mm_cmpngt_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpngt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
return _mm_cmpngt_sd(a, b);
#else
return simde_mm_cmple_sd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpngt_sd(a, b) simde_mm_cmpngt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpnge_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpnge_pd(a, b);
#else
return simde_mm_cmplt_pd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnge_pd(a, b) simde_mm_cmpnge_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpnge_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
return _mm_cmpnge_sd(a, b);
#else
return simde_mm_cmplt_sd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnge_sd(a, b) simde_mm_cmpnge_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpnlt_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpnlt_pd(a, b);
#else
return simde_mm_cmpge_pd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnlt_pd(a, b) simde_mm_cmpnlt_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpnlt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpnlt_sd(a, b);
#else
return simde_mm_cmpge_sd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnlt_sd(a, b) simde_mm_cmpnlt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpnle_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpnle_pd(a, b);
#else
return simde_mm_cmpgt_pd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnle_pd(a, b) simde_mm_cmpnle_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpnle_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpnle_sd(a, b);
#else
return simde_mm_cmpgt_sd(a, b);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpnle_sd(a, b) simde_mm_cmpnle_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpord_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpord_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint64x2_t ceqaa = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t ceqbb = vceqq_f64(b_.neon_f64, b_.neon_f64);
r_.neon_u64 = vandq_u64(ceqaa, ceqbb);
#elif defined(simde_math_isnan)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (!simde_math_isnan(a_.f64[i]) && !simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpord_pd(a, b) simde_mm_cmpord_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float64
simde_mm_cvtsd_f64 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
return _mm_cvtsd_f64(a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return HEDLEY_STATIC_CAST(simde_float64, vgetq_lane_f64(a_.neon_f64, 0));
#else
return a_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsd_f64(a) simde_mm_cvtsd_f64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpord_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpord_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmpord_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmpord_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(simde_math_isnan)
r_.u64[0] = (!simde_math_isnan(a_.f64[0]) && !simde_math_isnan(b_.f64[0])) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpord_sd(a, b) simde_mm_cmpord_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpunord_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpunord_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t ceqaa = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t ceqbb = vceqq_f64(b_.neon_f64, b_.neon_f64);
r_.neon_u64 = vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vandq_u64(ceqaa, ceqbb))));
#elif defined(simde_math_isnan)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.u64[i] = (simde_math_isnan(a_.f64[i]) || simde_math_isnan(b_.f64[i])) ? ~UINT64_C(0) : UINT64_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpunord_pd(a, b) simde_mm_cmpunord_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cmpunord_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cmpunord_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_cmpunord_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_cmpunord_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(simde_math_isnan)
r_.u64[0] = (simde_math_isnan(a_.f64[0]) || simde_math_isnan(b_.f64[0])) ? ~UINT64_C(0) : UINT64_C(0);
r_.u64[1] = a_.u64[1];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cmpunord_sd(a, b) simde_mm_cmpunord_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtepi32_pd (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtepi32_pd(a);
#else
simde__m128d_private r_;
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f64, a_.m64_private[0].i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = (simde_float64) a_.i32[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtepi32_pd(a) simde_mm_cvtepi32_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtepi32_ps (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtepi32_ps(a);
#else
simde__m128_private r_;
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_convert_i32x4(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wc11-extensions")
#pragma clang diagnostic ignored "-Wc11-extensions"
#endif
r_.altivec_f32 = vec_ctf(a_.altivec_i32, 0);
HEDLEY_DIAGNOSTIC_POP
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtepi32_ps(a) simde_mm_cvtepi32_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtpd_pi32 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpd_pi32(a);
#else
simde__m64_private r_;
simde__m128d_private a_ = simde__m128d_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float64 v = simde_math_round(a_.f64[i]);
#if defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#else
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#endif
}
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpd_pi32(a) simde_mm_cvtpd_pi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cvtpd_epi32 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(SIMDE_BUG_PGI_30107)
return _mm_cvtpd_epi32(a);
#else
simde__m128i_private r_;
r_.m64[0] = simde_mm_cvtpd_pi32(a);
r_.m64[1] = simde_mm_setzero_si64();
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpd_epi32(a) simde_mm_cvtpd_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpd_ps (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtpd_ps(a);
#else
simde__m128_private r_;
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_f64(a_.neon_f64), vdup_n_f32(0.0f));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_float2(a_.altivec_f64, vec_splats(0.0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_demote_f64x2_zero(a_.wasm_v128);
#elif HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && HEDLEY_HAS_BUILTIN(__builtin_convertvector)
float __attribute__((__vector_size__(8))) z = { 0.0f, 0.0f };
r_.f32 =
__builtin_shufflevector(
__builtin_convertvector(__builtin_shufflevector(a_.f64, a_.f64, 0, 1), __typeof__(z)), z,
0, 1, 2, 3
);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.f64[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.f64[1]);
r_.f32[2] = SIMDE_FLOAT32_C(0.0);
r_.f32[3] = SIMDE_FLOAT32_C(0.0);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpd_ps(a) simde_mm_cvtpd_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtpi32_pd (simde__m64 a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_pd(a);
#else
simde__m128d_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f64, a_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = (simde_float64) a_.i32[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtpi32_pd(a) simde_mm_cvtpi32_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cvtps_epi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtps_epi32(a);
#else
simde__m128i_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) && !defined(SIMDE_BUG_GCC_95399)
a_ = simde__m128_to_private(a);
r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES)
a_ = simde__m128_to_private(a);
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_
SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
r_.altivec_i32 = vec_cts(a_.altivec_f32, 1);
HEDLEY_DIAGNOSTIC_POP
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES)
a_ = simde__m128_to_private(a);
r_.wasm_v128 = wasm_i32x4_trunc_sat_f32x4(a_.wasm_v128);
#else
a_ = simde__m128_to_private(simde_x_mm_round_ps(a, SIMDE_MM_FROUND_TO_NEAREST_INT, 1));
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#else
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#endif
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtps_epi32(a) simde_mm_cvtps_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtps_pd (simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtps_pd(a);
#else
simde__m128d_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f64, a_.m64_private[0].f32);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vcvt_f64_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f32[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtps_pd(a) simde_mm_cvtps_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtsd_si32 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtsd_si32(a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
simde_float64 v = simde_math_round(a_.f64[0]);
#if defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, v);
#else
return ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsd_si32(a) simde_mm_cvtsd_si32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtsd_si64 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if defined(__PGI)
return _mm_cvtsd_si64x(a);
#else
return _mm_cvtsd_si64(a);
#endif
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
return SIMDE_CONVERT_FTOI(int64_t, simde_math_round(a_.f64[0]));
#endif
}
#define simde_mm_cvtsd_si64x(a) simde_mm_cvtsd_si64(a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
#define _mm_cvtsd_si64(a) simde_mm_cvtsd_si64(a)
#define _mm_cvtsd_si64x(a) simde_mm_cvtsd_si64x(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsd_ss (simde__m128 a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtsd_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m128d_private b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vcvtxd_f32_f64(vgetq_lane_f64(b_.neon_f64, 0)), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b_.f64[0]);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsd_ss(a, b) simde_mm_cvtsd_ss(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_x_mm_cvtsi128_si16 (simde__m128i a) {
simde__m128i_private
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_s16(a_.neon_i16, 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return HEDLEY_STATIC_CAST(int16_t, wasm_i16x8_extract_lane(a_.wasm_v128, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#if defined(SIMDE_BUG_GCC_95227)
(void) a_;
#endif
return vec_extract(a_.altivec_i16, 0);
#else
return a_.i16[0];
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtsi128_si32 (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtsi128_si32(a);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_s32(a_.neon_i32, 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return HEDLEY_STATIC_CAST(int32_t, wasm_i32x4_extract_lane(a_.wasm_v128, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#if defined(SIMDE_BUG_GCC_95227)
(void) a_;
#endif
return vec_extract(a_.altivec_i32, 0);
#else
return a_.i32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsi128_si32(a) simde_mm_cvtsi128_si32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtsi128_si64 (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if defined(__PGI)
return _mm_cvtsi128_si64x(a);
#else
return _mm_cvtsi128_si64(a);
#endif
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && !defined(HEDLEY_IBM_VERSION)
return vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed long long), a_.i64), 0);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_s64(a_.neon_i64, 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return HEDLEY_STATIC_CAST(int64_t, wasm_i64x2_extract_lane(a_.wasm_v128, 0));
#endif
return a_.i64[0];
#endif
}
#define simde_mm_cvtsi128_si64x(a) simde_mm_cvtsi128_si64(a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
#define _mm_cvtsi128_si64(a) simde_mm_cvtsi128_si64(a)
#define _mm_cvtsi128_si64x(a) simde_mm_cvtsi128_si64x(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtsi32_sd (simde__m128d a, int32_t b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtsi32_sd(a, b);
#else
simde__m128d_private r_;
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vsetq_lane_f64(HEDLEY_STATIC_CAST(float64_t, b), a_.neon_f64, 0);
#else
r_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, b);
r_.i64[1] = a_.i64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsi32_sd(a, b) simde_mm_cvtsi32_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_cvtsi16_si128 (int16_t a) {
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vsetq_lane_s16(a, vdupq_n_s16(0), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_make(a, 0, 0, 0, 0, 0, 0, 0);
#else
r_.i16[0] = a;
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
r_.i16[4] = 0;
r_.i16[5] = 0;
r_.i16[6] = 0;
r_.i16[7] = 0;
#endif
return simde__m128i_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cvtsi32_si128 (int32_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtsi32_si128(a);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vsetq_lane_s32(a, vdupq_n_s32(0), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_make(a, 0, 0, 0);
#else
r_.i32[0] = a;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtsi32_si128(a) simde_mm_cvtsi32_si128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtsi64_sd (simde__m128d a, int64_t b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_sd(a, b);
#else
return _mm_cvtsi64x_sd(a, b);
#endif
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vsetq_lane_f64(HEDLEY_STATIC_CAST(float64_t, b), a_.neon_f64, 0);
#else
r_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, b);
r_.f64[1] = a_.f64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#define simde_mm_cvtsi64x_sd(a, b) simde_mm_cvtsi64_sd(a, b)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
#define _mm_cvtsi64_sd(a, b) simde_mm_cvtsi64_sd(a, b)
#define _mm_cvtsi64x_sd(a, b) simde_mm_cvtsi64x_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cvtsi64_si128 (int64_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_si128(a);
#else
return _mm_cvtsi64x_si128(a);
#endif
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vsetq_lane_s64(a, vdupq_n_s64(0), 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_make(a, 0);
#else
r_.i64[0] = a;
r_.i64[1] = 0;
#endif
return simde__m128i_from_private(r_);
#endif
}
#define simde_mm_cvtsi64x_si128(a) simde_mm_cvtsi64_si128(a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
#define _mm_cvtsi64_si128(a) simde_mm_cvtsi64_si128(a)
#define _mm_cvtsi64x_si128(a) simde_mm_cvtsi64x_si128(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_cvtss_sd (simde__m128d a, simde__m128 b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvtss_sd(a, b);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
float64x2_t temp = vcvt_f64_f32(vset_lane_f32(vgetq_lane_f32(simde__m128_to_private(b).neon_f32, 0), vdup_n_f32(0), 0));
return vsetq_lane_f64(vgetq_lane_f64(simde__m128d_to_private(a).neon_f64, 1), temp, 1);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a);
simde__m128_private b_ = simde__m128_to_private(b);
a_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, b_.f32[0]);
return simde__m128d_from_private(a_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvtss_sd(a, b) simde_mm_cvtss_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvttpd_pi32 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvttpd_pi32(a);
#else
simde__m64_private r_;
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.f64);
#else
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float64 v = a_.f64[i];
#if defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#else
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvttpd_pi32(a) simde_mm_cvttpd_pi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cvttpd_epi32 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvttpd_epi32(a);
#else
simde__m128i_private r_;
r_.m64[0] = simde_mm_cvttpd_pi32(a);
r_.m64[1] = simde_mm_setzero_si64();
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvttpd_epi32(a) simde_mm_cvttpd_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_cvttps_epi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvttps_epi32(a);
#else
simde__m128i_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvtq_s32_f32(a_.neon_f32);
#if !defined(SIMDE_FAST_CONVERSION_RANGE) || !defined(SIMDE_FAST_NANS)
/* Values below INT32_MIN saturate anyways, so we don't need to
* test for that. */
#if !defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_FAST_NANS)
uint32x4_t valid_input =
vandq_u32(
vcltq_f32(a_.neon_f32, vdupq_n_f32(SIMDE_FLOAT32_C(2147483648.0))),
vceqq_f32(a_.neon_f32, a_.neon_f32)
);
#elif !defined(SIMDE_FAST_CONVERSION_RANGE)
uint32x4_t valid_input = vcltq_f32(a_.neon_f32, vdupq_n_f32(SIMDE_FLOAT32_C(2147483648.0)));
#elif !defined(SIMDE_FAST_NANS)
uint32x4_t valid_input = vceqq_f32(a_.neon_f32, a_.neon_f32);
#endif
r_.neon_i32 = vbslq_s32(valid_input, r_.neon_i32, vdupq_n_s32(INT32_MIN));
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_trunc_sat_f32x4(a_.wasm_v128);
#if !defined(SIMDE_FAST_CONVERSION_RANGE) || !defined(SIMDE_FAST_NANS)
#if !defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_FAST_NANS)
v128_t valid_input =
wasm_v128_and(
wasm_f32x4_lt(a_.wasm_v128, wasm_f32x4_splat(SIMDE_FLOAT32_C(2147483648.0))),
wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128)
);
#elif !defined(SIMDE_FAST_CONVERSION_RANGE)
v128_t valid_input = wasm_f32x4_lt(a_.wasm_v128, wasm_f32x4_splat(SIMDE_FLOAT32_C(2147483648.0)));
#elif !defined(SIMDE_FAST_NANS)
v128_t valid_input = wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128);
#endif
r_.wasm_v128 = wasm_v128_bitselect(r_.wasm_v128, wasm_i32x4_splat(INT32_MIN), valid_input);
#endif
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.f32);
#if !defined(SIMDE_FAST_CONVERSION_RANGE) || !defined(SIMDE_FAST_NANS)
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
static const simde_float32 SIMDE_VECTOR(16) first_too_high = { SIMDE_FLOAT32_C(2147483648.0), SIMDE_FLOAT32_C(2147483648.0), SIMDE_FLOAT32_C(2147483648.0), SIMDE_FLOAT32_C(2147483648.0) };
__typeof__(r_.i32) valid_input =
HEDLEY_REINTERPRET_CAST(
__typeof__(r_.i32),
(a_.f32 < first_too_high) & (a_.f32 >= -first_too_high)
);
#elif !defined(SIMDE_FAST_NANS)
__typeof__(r_.i32) valid_input = HEDLEY_REINTERPRET_CAST( __typeof__(valid_input), a_.f32 == a_.f32);
#endif
__typeof__(r_.i32) invalid_output = { INT32_MIN, INT32_MIN, INT32_MIN, INT32_MIN };
r_.i32 = (r_.i32 & valid_input) | (invalid_output & ~valid_input);
#endif
#else
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_NANS)
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#else
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#endif
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvttps_epi32(a) simde_mm_cvttps_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvttsd_si32 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_cvttsd_si32(a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
simde_float64 v = a_.f64[0];
#if defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, v);
#else
return ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_cvttsd_si32(a) simde_mm_cvttsd_si32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttsd_si64 (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvttsd_si64(a);
#else
return _mm_cvttsd_si64x(a);
#endif
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
return SIMDE_CONVERT_FTOI(int64_t, a_.f64[0]);
#endif
}
#define simde_mm_cvttsd_si64x(a) simde_mm_cvttsd_si64(a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
#define _mm_cvttsd_si64(a) simde_mm_cvttsd_si64(a)
#define _mm_cvttsd_si64x(a) simde_mm_cvttsd_si64x(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_div_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_div_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = a_.f64 / b_.f64;
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vdivq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_div(a_.wasm_v128, b_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f64[i] / b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_div_pd(a, b) simde_mm_div_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_div_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_div_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_div_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_div_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
float64x2_t temp = vdivq_f64(a_.neon_f64, b_.neon_f64);
r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
#else
r_.f64[0] = a_.f64[0] / b_.f64[0];
r_.f64[1] = a_.f64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_div_sd(a, b) simde_mm_div_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_extract_epi16 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
uint16_t r;
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#if defined(SIMDE_BUG_GCC_95227)
(void) a_;
(void) imm8;
#endif
r = HEDLEY_STATIC_CAST(uint16_t, vec_extract(a_.altivec_i16, imm8));
#else
r = a_.u16[imm8 & 7];
#endif
return HEDLEY_STATIC_CAST(int32_t, r);
}
#if defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(4,6,0))
#define simde_mm_extract_epi16(a, imm8) _mm_extract_epi16(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_extract_epi16(a, imm8) (HEDLEY_STATIC_CAST(int32_t, vgetq_lane_s16(simde__m128i_to_private(a).neon_i16, (imm8))) & (INT32_C(0x0000ffff)))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_extract_epi16(a, imm8) simde_mm_extract_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_insert_epi16 (simde__m128i a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
simde__m128i_private a_ = simde__m128i_to_private(a);
a_.i16[imm8 & 7] = i;
return simde__m128i_from_private(a_);
}
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
#define simde_mm_insert_epi16(a, i, imm8) _mm_insert_epi16((a), (i), (imm8))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_insert_epi16(a, i, imm8) simde__m128i_from_neon_i16(vsetq_lane_s16((i), simde__m128i_to_neon_i16(a), (imm8)))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_insert_epi16(a, i, imm8) simde_mm_insert_epi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_load_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_load_pd(mem_addr);
#else
simde__m128d_private r_;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vld1q_f64(mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vld1q_u32(HEDLEY_REINTERPRET_CAST(uint32_t const*, mem_addr));
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128d), sizeof(r_));
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_load_pd(mem_addr) simde_mm_load_pd(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_load1_pd (simde_float64 const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_load1_pd(mem_addr);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return simde__m128d_from_neon_f64(vld1q_dup_f64(mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return simde__m128d_from_wasm_v128(wasm_v128_load64_splat(mem_addr));
#else
return simde_mm_set1_pd(*mem_addr);
#endif
}
#define simde_mm_load_pd1(mem_addr) simde_mm_load1_pd(mem_addr)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_load_pd1(mem_addr) simde_mm_load1_pd(mem_addr)
#define _mm_load1_pd(mem_addr) simde_mm_load1_pd(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_load_sd (simde_float64 const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_load_sd(mem_addr);
#else
simde__m128d_private r_;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vsetq_lane_f64(*mem_addr, vdupq_n_f64(0), 0);
#else
r_.f64[0] = *mem_addr;
r_.u64[1] = UINT64_C(0);
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_load_sd(mem_addr) simde_mm_load_sd(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_load_si128 (simde__m128i const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_load_si128(HEDLEY_REINTERPRET_CAST(__m128i const*, mem_addr));
#else
simde__m128i_private r_;
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_ld(0, HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(int) const*, mem_addr));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vld1q_s32(HEDLEY_REINTERPRET_CAST(int32_t const*, mem_addr));
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128i), sizeof(simde__m128i));
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_load_si128(mem_addr) simde_mm_load_si128(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_loadh_pd (simde__m128d a, simde_float64 const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadh_pd(a, mem_addr);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vcombine_f64(vget_low_f64(a_.neon_f64), vld1_f64(HEDLEY_REINTERPRET_CAST(const float64_t*, mem_addr)));
#else
simde_float64 t;
simde_memcpy(&t, mem_addr, sizeof(t));
r_.f64[0] = a_.f64[0];
r_.f64[1] = t;
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadh_pd(a, mem_addr) simde_mm_loadh_pd(a, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadl_epi64 (simde__m128i const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadl_epi64(mem_addr);
#else
simde__m128i_private r_;
int64_t value;
simde_memcpy(&value, mem_addr, sizeof(value));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vcombine_s64(vld1_s64(HEDLEY_REINTERPRET_CAST(int64_t const *, mem_addr)), vdup_n_s64(0));
#else
r_.i64[0] = value;
r_.i64[1] = 0;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadl_epi64(mem_addr) simde_mm_loadl_epi64(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_loadl_pd (simde__m128d a, simde_float64 const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadl_pd(a, mem_addr);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vcombine_f64(vld1_f64(
HEDLEY_REINTERPRET_CAST(const float64_t*, mem_addr)), vget_high_f64(a_.neon_f64));
#else
r_.f64[0] = *mem_addr;
r_.u64[1] = a_.u64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadl_pd(a, mem_addr) simde_mm_loadl_pd(a, mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_loadr_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadr_pd(mem_addr);
#else
simde__m128d_private
r_;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vld1q_f64(mem_addr);
r_.neon_f64 = vextq_f64(r_.neon_f64, r_.neon_f64, 1);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vld1q_s64(HEDLEY_REINTERPRET_CAST(int64_t const *, mem_addr));
r_.neon_i64 = vextq_s64(r_.neon_i64, r_.neon_i64, 1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t tmp = wasm_v128_load(mem_addr);
r_.wasm_v128 = wasm_i64x2_shuffle(tmp, tmp, 1, 0);
#else
r_.f64[0] = mem_addr[1];
r_.f64[1] = mem_addr[0];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadr_pd(mem_addr) simde_mm_loadr_pd(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_loadu_pd (simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)]) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadu_pd(mem_addr);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vld1q_f64(mem_addr);
#else
simde__m128d_private r_;
simde_memcpy(&r_, mem_addr, sizeof(r_));
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadu_pd(mem_addr) simde_mm_loadu_pd(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_epi8(void const * mem_addr) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_CLANG_REV_344862)
return _mm_loadu_epi8(mem_addr);
#elif defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vld1q_s8(HEDLEY_REINTERPRET_CAST(int8_t const*, mem_addr));
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128i_from_private(r_);
#endif
}
#define simde_x_mm_loadu_epi8(mem_addr) simde_mm_loadu_epi8(mem_addr)
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && (defined(SIMDE_BUG_GCC_95483) || defined(SIMDE_BUG_CLANG_REV_344862)))
#undef _mm_loadu_epi8
#define _mm_loadu_epi8(a) simde_mm_loadu_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_epi16(void const * mem_addr) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && defined(SIMDE_X86_AVX512BW_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_CLANG_REV_344862)
return _mm_loadu_epi16(mem_addr);
#elif defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vreinterpretq_s16_s8(vld1q_s8(HEDLEY_REINTERPRET_CAST(int8_t const*, mem_addr)));
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128i_from_private(r_);
#endif
}
#define simde_x_mm_loadu_epi16(mem_addr) simde_mm_loadu_epi16(mem_addr)
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && (defined(SIMDE_BUG_GCC_95483) || defined(SIMDE_BUG_CLANG_REV_344862)))
#undef _mm_loadu_epi16
#define _mm_loadu_epi16(a) simde_mm_loadu_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_epi32(void const * mem_addr) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_CLANG_REV_344862)
return _mm_loadu_epi32(mem_addr);
#elif defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vreinterpretq_s32_s8(vld1q_s8(HEDLEY_REINTERPRET_CAST(int8_t const*, mem_addr)));
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128i_from_private(r_);
#endif
}
#define simde_x_mm_loadu_epi32(mem_addr) simde_mm_loadu_epi32(mem_addr)
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && (defined(SIMDE_BUG_GCC_95483) || defined(SIMDE_BUG_CLANG_REV_344862)))
#undef _mm_loadu_epi32
#define _mm_loadu_epi32(a) simde_mm_loadu_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_epi64(void const * mem_addr) {
#if defined(SIMDE_X86_AVX512VL_NATIVE) && !defined(SIMDE_BUG_GCC_95483) && !defined(SIMDE_BUG_CLANG_REV_344862)
return _mm_loadu_epi64(mem_addr);
#elif defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadu_si128(SIMDE_ALIGN_CAST(__m128i const *, mem_addr));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vreinterpretq_s64_s8(vld1q_s8(HEDLEY_REINTERPRET_CAST(int8_t const*, mem_addr)));
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128i_from_private(r_);
#endif
}
#define simde_x_mm_loadu_epi64(mem_addr) simde_mm_loadu_epi64(mem_addr)
#if defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && (defined(SIMDE_BUG_GCC_95483) || defined(SIMDE_BUG_CLANG_REV_344862)))
#undef _mm_loadu_epi64
#define _mm_loadu_epi64(a) simde_mm_loadu_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_si128 (void const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_loadu_si128(HEDLEY_STATIC_CAST(__m128i const*, mem_addr));
#else
simde__m128i_private r_;
#if HEDLEY_GNUC_HAS_ATTRIBUTE(may_alias,3,3,0)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_PACKED_
struct simde_mm_loadu_si128_s {
__typeof__(r_) v;
} __attribute__((__packed__, __may_alias__));
r_ = HEDLEY_REINTERPRET_CAST(const struct simde_mm_loadu_si128_s *, mem_addr)->v;
HEDLEY_DIAGNOSTIC_POP
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vld1q_s8(HEDLEY_REINTERPRET_CAST(int8_t const*, mem_addr));
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadu_si128(mem_addr) simde_mm_loadu_si128(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_madd_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_madd_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
int32x4_t pl = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16));
int32x4_t ph = vmull_high_s16(a_.neon_i16, b_.neon_i16);
r_.neon_i32 = vpaddq_s32(pl, ph);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x4_t pl = vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16));
int32x4_t ph = vmull_s16(vget_high_s16(a_.neon_i16), vget_high_s16(b_.neon_i16));
int32x2_t rl = vpadd_s32(vget_low_s32(pl), vget_high_s32(pl));
int32x2_t rh = vpadd_s32(vget_low_s32(ph), vget_high_s32(ph));
r_.neon_i32 = vcombine_s32(rl, rh);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_msum(a_.altivec_i16, b_.altivec_i16, vec_splats(0));
#elif defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_mule(a_.altivec_i16, b_.altivec_i16) + vec_mulo(a_.altivec_i16, b_.altivec_i16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
int32_t SIMDE_VECTOR(32) a32, b32, p32;
SIMDE_CONVERT_VECTOR_(a32, a_.i16);
SIMDE_CONVERT_VECTOR_(b32, b_.i16);
p32 = a32 * b32;
r_.i32 =
__builtin_shufflevector(p32, p32, 0, 2, 4, 6) +
__builtin_shufflevector(p32, p32, 1, 3, 5, 7);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i += 2) {
r_.i32[i / 2] = (a_.i16[i] * b_.i16[i]) + (a_.i16[i + 1] * b_.i16[i + 1]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_madd_epi16(a, b) simde_mm_madd_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmoveu_si128 (simde__m128i a, simde__m128i mask, int8_t mem_addr[HEDLEY_ARRAY_PARAM(16)]) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_maskmoveu_si128(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
mask_ = simde__m128i_to_private(mask);
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
if (mask_.u8[i] & 0x80) {
mem_addr[i] = a_.i8[i];
}
}
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_maskmoveu_si128(a, mask, mem_addr) simde_mm_maskmoveu_si128((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_movemask_epi8 (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__INTEL_COMPILER)
/* ICC has trouble with _mm_movemask_epi8 at -O2 and above: */
return _mm_movemask_epi8(a);
#else
int32_t r = 0;
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* https://github.com/WebAssembly/simd/pull/201#issue-380682845 */
static const uint8_t md[16] = {
1 << 0, 1 << 1, 1 << 2, 1 << 3,
1 << 4, 1 << 5, 1 << 6, 1 << 7,
1 << 0, 1 << 1, 1 << 2, 1 << 3,
1 << 4, 1 << 5, 1 << 6, 1 << 7,
};
/* Extend sign bit over entire lane */
uint8x16_t extended = vreinterpretq_u8_s8(vshrq_n_s8(a_.neon_i8, 7));
/* Clear all but the bit we're interested in. */
uint8x16_t masked = vandq_u8(vld1q_u8(md), extended);
/* Alternate bytes from low half and high half */
uint8x8x2_t tmp = vzip_u8(vget_low_u8(masked), vget_high_u8(masked));
uint16x8_t x = vreinterpretq_u16_u8(vcombine_u8(tmp.val[0], tmp.val[1]));
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r = vaddvq_u16(x);
#else
uint64x2_t t64 = vpaddlq_u32(vpaddlq_u16(x));
r =
HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(t64, 0)) +
HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(t64, 1));
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 };
r = HEDLEY_STATIC_CAST(int32_t, vec_extract(vec_vbpermq(a_.altivec_u8, perm), 1));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG)
static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) perm = { 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 };
r = HEDLEY_STATIC_CAST(int32_t, vec_extract(vec_vbpermq(a_.altivec_u8, perm), 14));
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
r |= (a_.u8[15 - i] >> 7) << (15 - i);
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_movemask_epi8(a) simde_mm_movemask_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_movemask_pd (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_movemask_pd(a);
#else
int32_t r = 0;
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
uint64x2_t shifted = vshrq_n_u64(a_.neon_u64, 63);
r =
HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(shifted, 0)) +
(HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u64(shifted, 1)) << 1);
HEDLEY_DIAGNOSTIC_POP
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && defined(SIMDE_BUG_CLANG_50932)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 64, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), vec_bperm(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned __int128), a_.altivec_u64), idx));
r = HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) idx = { 64, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) res = vec_bperm(a_.altivec_u8, idx);
r = HEDLEY_STATIC_CAST(int32_t, vec_extract(HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(signed int), res), 2));
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < (sizeof(a_.u64) / sizeof(a_.u64[0])) ; i++) {
r |= (a_.u64[i] >> 63) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_movemask_pd(a) simde_mm_movemask_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_movepi64_pi64 (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movepi64_pi64(a);
#else
simde__m64_private r_;
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i64 = vget_low_s64(a_.neon_i64);
#else
r_.i64[0] = a_.i64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_movepi64_pi64(a) simde_mm_movepi64_pi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_movpi64_epi64 (simde__m64 a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movpi64_epi64(a);
#else
simde__m128i_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vcombine_s64(a_.neon_i64, vdup_n_s64(0));
#else
r_.i64[0] = a_.i64[0];
r_.i64[1] = 0;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_movpi64_epi64(a) simde_mm_movpi64_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_min_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_min_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vminq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_min(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = vec_min(a_.altivec_i16, b_.altivec_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_min_epi16(a, b) simde_mm_min_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_min_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_min_epu8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vminq_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_min(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u8 = vec_min(a_.altivec_u8, b_.altivec_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_min_epu8(a, b) simde_mm_min_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_min_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_min_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = vec_min(a_.altivec_f64, b_.altivec_f64);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vminq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_min(a_.wasm_v128, b_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = (a_.f64[i] < b_.f64[i]) ? a_.f64[i] : b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_min_pd(a, b) simde_mm_min_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_min_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_min_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_min_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_min_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
float64x2_t temp = vminq_f64(a_.neon_f64, b_.neon_f64);
r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
#else
r_.f64[0] = (a_.f64[0] < b_.f64[0]) ? a_.f64[0] : b_.f64[0];
r_.f64[1] = a_.f64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_min_sd(a, b) simde_mm_min_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_max_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_max_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmaxq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = vec_max(a_.altivec_i16, b_.altivec_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_max_epi16(a, b) simde_mm_max_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_max_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_max_epu8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmaxq_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_u8 = vec_max(a_.altivec_u8, b_.altivec_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_max_epu8(a, b) simde_mm_max_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_max_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_max_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = vec_max(a_.altivec_f64, b_.altivec_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vmaxq_f64(a_.neon_f64, b_.neon_f64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = (a_.f64[i] > b_.f64[i]) ? a_.f64[i] : b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_max_pd(a, b) simde_mm_max_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_max_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_max_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_max_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_max_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
float64x2_t temp = vmaxq_f64(a_.neon_f64, b_.neon_f64);
r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
#else
r_.f64[0] = (a_.f64[0] > b_.f64[0]) ? a_.f64[0] : b_.f64[0];
r_.f64[1] = a_.f64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_max_sd(a, b) simde_mm_max_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_move_epi64 (simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_move_epi64(a);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vsetq_lane_s64(0, a_.neon_i64, 1);
#else
r_.i64[0] = a_.i64[0];
r_.i64[1] = 0;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_move_epi64(a) simde_mm_move_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mul_epu32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_mul_epu32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x2_t a_lo = vmovn_u64(a_.neon_u64);
uint32x2_t b_lo = vmovn_u64(b_.neon_u64);
r_.neon_u64 = vmull_u32(a_lo, b_lo);
#elif defined(SIMDE_SHUFFLE_VECTOR_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
__typeof__(a_.u32) z = { 0, };
a_.u32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.u32, z, 0, 4, 2, 6);
b_.u32 = SIMDE_SHUFFLE_VECTOR_(32, 16, b_.u32, z, 0, 4, 2, 6);
r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u32) *
HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), b_.u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i * 2]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[i * 2]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mul_epu32(a, b) simde_mm_mul_epu32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_mul_epi64 (simde__m128i a, simde__m128i b) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 * b_.i64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] * b_.i64[i];
}
#endif
return simde__m128i_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_mod_epi64 (simde__m128i a, simde__m128i b) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && !defined(SIMDE_BUG_PGI_30104)
r_.i64 = a_.i64 % b_.i64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] % b_.i64[i];
}
#endif
return simde__m128i_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mul_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_mul_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = a_.f64 * b_.f64;
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vmulq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_mul(a_.wasm_v128, b_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f64[i] * b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mul_pd(a, b) simde_mm_mul_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_mul_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_mul_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_mul_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_mul_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
float64x2_t temp = vmulq_f64(a_.neon_f64, b_.neon_f64);
r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
#else
r_.f64[0] = a_.f64[0] * b_.f64[0];
r_.f64[1] = a_.f64[1];
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mul_sd(a, b) simde_mm_mul_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mul_su32 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
return _mm_mul_su32(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.u64[0] = vget_lane_u64(vget_low_u64(vmull_u32(vreinterpret_u32_s64(a_.neon_i64), vreinterpret_u32_s64(b_.neon_i64))), 0);
#else
r_.u64[0] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[0]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[0]);
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mul_su32(a, b) simde_mm_mul_su32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mulhi_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_mulhi_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x4_t a3210 = vget_low_s16(a_.neon_i16);
int16x4_t b3210 = vget_low_s16(b_.neon_i16);
int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
int32x4_t ab7654 = vmull_high_s16(a_.neon_i16, b_.neon_i16);
r_.neon_i16 = vuzp2q_s16(vreinterpretq_s16_s32(ab3210), vreinterpretq_s16_s32(ab7654));
#else
int16x4_t a7654 = vget_high_s16(a_.neon_i16);
int16x4_t b7654 = vget_high_s16(b_.neon_i16);
int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
uint16x8x2_t rv = vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
r_.neon_u16 = rv.val[1];
#endif
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (HEDLEY_STATIC_CAST(uint32_t, HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i])) >> 16));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mulhi_epi16(a, b) simde_mm_mulhi_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mulhi_epu16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
return _mm_mulhi_epu16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t a3210 = vget_low_u16(a_.neon_u16);
uint16x4_t b3210 = vget_low_u16(b_.neon_u16);
uint32x4_t ab3210 = vmull_u16(a3210, b3210); /* 3333222211110000 */
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint32x4_t ab7654 = vmull_high_u16(a_.neon_u16, b_.neon_u16);
r_.neon_u16 = vuzp2q_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
#else
uint16x4_t a7654 = vget_high_u16(a_.neon_u16);
uint16x4_t b7654 = vget_high_u16(b_.neon_u16);
uint32x4_t ab7654 = vmull_u16(a7654, b7654); /* 7777666655554444 */
uint16x8x2_t neon_r = vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
r_.neon_u16 = neon_r.val[1];
#endif
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]) >> 16);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mulhi_epu16(a, b) simde_mm_mulhi_epu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_mullo_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_mullo_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmulq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a_;
(void) b_;
r_.altivec_i16 = vec_mul(a_.altivec_i16, b_.altivec_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mullo_epi16(a, b) simde_mm_mullo_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_or_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_or_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vorrq_s64(a_.neon_i64, b_.neon_i64);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] | b_.i32f[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_or_pd(a, b) simde_mm_or_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_or_si128 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_or_si128(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] | b_.i32f[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_or_si128(a, b) simde_mm_or_si128(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_packs_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_packs_epi16(a, b);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b),
r_;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i8 = vqmovn_high_s16(vqmovn_s16(a_.neon_i16), b_.neon_i16);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vcombine_s8(vqmovn_s16(a_.neon_i16), vqmovn_s16(b_.neon_i16));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i8 = vec_packs(a_.altivec_i16, b_.altivec_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_narrow_i16x8(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
int16_t SIMDE_VECTOR(32) v = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
const int16_t SIMDE_VECTOR(32) min = { INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN, INT8_MIN };
const int16_t SIMDE_VECTOR(32) max = { INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX, INT8_MAX };
int16_t m SIMDE_VECTOR(32);
m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min);
v = (v & ~m) | (min & m);
m = v > max;
v = (v & ~m) | (max & m);
SIMDE_CONVERT_VECTOR_(r_.i8, v);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
int16_t v = (i < (sizeof(a_.i16) / sizeof(a_.i16[0]))) ? a_.i16[i] : b_.i16[i & 7];
r_.i8[i] = (v < INT8_MIN) ? INT8_MIN : ((v > INT8_MAX) ? INT8_MAX : HEDLEY_STATIC_CAST(int8_t, v));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_packs_epi16(a, b) simde_mm_packs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_packs_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_packs_epi32(a, b);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b),
r_;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i16 = vqmovn_high_s32(vqmovn_s32(a_.neon_i32), b_.neon_i32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vcombine_s16(vqmovn_s32(a_.neon_i32), vqmovn_s32(b_.neon_i32));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i16 = vec_packs(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_X86_SSE2_NATIVE)
r_.sse_m128i = _mm_packs_epi32(a_.sse_m128i, b_.sse_m128i);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_narrow_i32x4(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector)
int32_t SIMDE_VECTOR(32) v = SIMDE_SHUFFLE_VECTOR_(32, 32, a_.i32, b_.i32, 0, 1, 2, 3, 4, 5, 6, 7);
const int32_t SIMDE_VECTOR(32) min = { INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN, INT16_MIN };
const int32_t SIMDE_VECTOR(32) max = { INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX, INT16_MAX };
int32_t m SIMDE_VECTOR(32);
m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v < min);
v = (v & ~m) | (min & m);
m = HEDLEY_REINTERPRET_CAST(__typeof__(m), v > max);
v = (v & ~m) | (max & m);
SIMDE_CONVERT_VECTOR_(r_.i16, v);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
int32_t v = (i < (sizeof(a_.i32) / sizeof(a_.i32[0]))) ? a_.i32[i] : b_.i32[i & 3];
r_.i16[i] = (v < INT16_MIN) ? INT16_MIN : ((v > INT16_MAX) ? INT16_MAX : HEDLEY_STATIC_CAST(int16_t, v));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_packs_epi32(a, b) simde_mm_packs_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_packus_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_packus_epi16(a, b);
#else
simde__m128i_private
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b),
r_;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
#if defined(SIMDE_BUG_CLANG_46840)
r_.neon_u8 = vqmovun_high_s16(vreinterpret_s8_u8(vqmovun_s16(a_.neon_i16)), b_.neon_i16);
#else
r_.neon_u8 = vqmovun_high_s16(vqmovun_s16(a_.neon_i16), b_.neon_i16);
#endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 =
vcombine_u8(
vqmovun_s16(a_.neon_i16),
vqmovun_s16(b_.neon_i16)
);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_u8 = vec_packsu(a_.altivec_i16, b_.altivec_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_narrow_i16x8(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_CONVERT_VECTOR_) && HEDLEY_HAS_BUILTIN(__builtin_shufflevector) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
int16_t v SIMDE_VECTOR(32) = SIMDE_SHUFFLE_VECTOR_(16, 32, a_.i16, b_.i16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
v &= ~(v >> 15);
v |= HEDLEY_REINTERPRET_CAST(__typeof__(v), v > UINT8_MAX);
SIMDE_CONVERT_VECTOR_(r_.i8, v);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
int16_t v = (i < (sizeof(a_.i16) / sizeof(a_.i16[0]))) ? a_.i16[i] : b_.i16[i & 7];
r_.u8[i] = (v < 0) ? UINT8_C(0) : ((v > UINT8_MAX) ? UINT8_MAX : HEDLEY_STATIC_CAST(uint8_t, v));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_packus_epi16(a, b) simde_mm_packus_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_pause (void) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_pause();
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_pause() (simde_mm_pause())
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sad_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sad_epu8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint16x8_t t = vpaddlq_u8(vabdq_u8(a_.neon_u8, b_.neon_u8));
r_.neon_u64 = vcombine_u64(
vpaddl_u32(vpaddl_u16(vget_low_u16(t))),
vpaddl_u32(vpaddl_u16(vget_high_u16(t))));
#else
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
uint16_t tmp = 0;
SIMDE_VECTORIZE_REDUCTION(+:tmp)
for (size_t j = 0 ; j < ((sizeof(r_.u8) / sizeof(r_.u8[0])) / 2) ; j++) {
const size_t e = j + (i * 8);
tmp += (a_.u8[e] > b_.u8[e]) ? (a_.u8[e] - b_.u8[e]) : (b_.u8[e] - a_.u8[e]);
}
r_.i64[i] = tmp;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sad_epu8(a, b) simde_mm_sad_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set_epi8 (int8_t e15, int8_t e14, int8_t e13, int8_t e12,
int8_t e11, int8_t e10, int8_t e9, int8_t e8,
int8_t e7, int8_t e6, int8_t e5, int8_t e4,
int8_t e3, int8_t e2, int8_t e1, int8_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_epi8(
e15, e14, e13, e12, e11, e10, e9, e8,
e7, e6, e5, e4, e3, e2, e1, e0);
#else
simde__m128i_private r_;
#if defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_make(
e0, e1, e2, e3, e4, e5, e6, e7,
e8, e9, e10, e11, e12, e13, e14, e15);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(int8x16_t) int8_t data[16] = {
e0, e1, e2, e3,
e4, e5, e6, e7,
e8, e9, e10, e11,
e12, e13, e14, e15};
r_.neon_i8 = vld1q_s8(data);
#else
r_.i8[ 0] = e0;
r_.i8[ 1] = e1;
r_.i8[ 2] = e2;
r_.i8[ 3] = e3;
r_.i8[ 4] = e4;
r_.i8[ 5] = e5;
r_.i8[ 6] = e6;
r_.i8[ 7] = e7;
r_.i8[ 8] = e8;
r_.i8[ 9] = e9;
r_.i8[10] = e10;
r_.i8[11] = e11;
r_.i8[12] = e12;
r_.i8[13] = e13;
r_.i8[14] = e14;
r_.i8[15] = e15;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_set_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set_epi16 (int16_t e7, int16_t e6, int16_t e5, int16_t e4,
int16_t e3, int16_t e2, int16_t e1, int16_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(int16x8_t) int16_t data[8] = { e0, e1, e2, e3, e4, e5, e6, e7 };
r_.neon_i16 = vld1q_s16(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_make(e0, e1, e2, e3, e4, e5, e6, e7);
#else
r_.i16[0] = e0;
r_.i16[1] = e1;
r_.i16[2] = e2;
r_.i16[3] = e3;
r_.i16[4] = e4;
r_.i16[5] = e5;
r_.i16[6] = e6;
r_.i16[7] = e7;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_si16 (void const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE) && ( \
SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(20,21,1))
return _mm_loadu_si16(mem_addr);
#else
int16_t val;
simde_memcpy(&val, mem_addr, sizeof(val));
return simde_x_mm_cvtsi16_si128(val);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadu_si16(mem_addr) simde_mm_loadu_si16(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set_epi32 (int32_t e3, int32_t e2, int32_t e1, int32_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_epi32(e3, e2, e1, e0);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(int32x4_t) int32_t data[4] = { e0, e1, e2, e3 };
r_.neon_i32 = vld1q_s32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_make(e0, e1, e2, e3);
#else
r_.i32[0] = e0;
r_.i32[1] = e1;
r_.i32[2] = e2;
r_.i32[3] = e3;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_epi32(e3, e2, e1, e0) simde_mm_set_epi32(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_si32 (void const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE) && ( \
SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(20,21,1))
return _mm_loadu_si32(mem_addr);
#else
int32_t val;
simde_memcpy(&val, mem_addr, sizeof(val));
return simde_mm_cvtsi32_si128(val);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadu_si32(mem_addr) simde_mm_loadu_si32(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set_epi64 (simde__m64 e1, simde__m64 e0) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_set_epi64(e1, e0);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vcombine_s64(simde__m64_to_neon_i64(e0), simde__m64_to_neon_i64(e1));
#else
r_.m64[0] = e0;
r_.m64[1] = e1;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_epi64(e1, e0) (simde_mm_set_epi64((e1), (e0)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set_epi64x (int64_t e1, int64_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0))
return _mm_set_epi64x(e1, e0);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(int64x2_t) int64_t data[2] = {e0, e1};
r_.neon_i64 = vld1q_s64(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_make(e0, e1);
#else
r_.i64[0] = e0;
r_.i64[1] = e1;
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_epi64x(e1, e0) simde_mm_set_epi64x(e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_loadu_si64 (void const* mem_addr) {
#if defined(SIMDE_X86_SSE2_NATIVE) && ( \
SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \
HEDLEY_GCC_VERSION_CHECK(11,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(20,21,1))
return _mm_loadu_si64(mem_addr);
#else
int64_t val;
simde_memcpy(&val, mem_addr, sizeof(val));
return simde_mm_cvtsi64_si128(val);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_loadu_si64(mem_addr) simde_mm_loadu_si64(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set_epu8 (uint8_t e15, uint8_t e14, uint8_t e13, uint8_t e12,
uint8_t e11, uint8_t e10, uint8_t e9, uint8_t e8,
uint8_t e7, uint8_t e6, uint8_t e5, uint8_t e4,
uint8_t e3, uint8_t e2, uint8_t e1, uint8_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_epi8(
HEDLEY_STATIC_CAST(char, e15), HEDLEY_STATIC_CAST(char, e14), HEDLEY_STATIC_CAST(char, e13), HEDLEY_STATIC_CAST(char, e12),
HEDLEY_STATIC_CAST(char, e11), HEDLEY_STATIC_CAST(char, e10), HEDLEY_STATIC_CAST(char, e9), HEDLEY_STATIC_CAST(char, e8),
HEDLEY_STATIC_CAST(char, e7), HEDLEY_STATIC_CAST(char, e6), HEDLEY_STATIC_CAST(char, e5), HEDLEY_STATIC_CAST(char, e4),
HEDLEY_STATIC_CAST(char, e3), HEDLEY_STATIC_CAST(char, e2), HEDLEY_STATIC_CAST(char, e1), HEDLEY_STATIC_CAST(char, e0));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(uint8x16_t) uint8_t data[16] = {
e0, e1, e2, e3,
e4, e5, e6, e7,
e8, e9, e10, e11,
e12, e13, e14, e15};
r_.neon_u8 = vld1q_u8(data);
#else
r_.u8[ 0] = e0; r_.u8[ 1] = e1; r_.u8[ 2] = e2; r_.u8[ 3] = e3;
r_.u8[ 4] = e4; r_.u8[ 5] = e5; r_.u8[ 6] = e6; r_.u8[ 7] = e7;
r_.u8[ 8] = e8; r_.u8[ 9] = e9; r_.u8[10] = e10; r_.u8[11] = e11;
r_.u8[12] = e12; r_.u8[13] = e13; r_.u8[14] = e14; r_.u8[15] = e15;
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set_epu16 (uint16_t e7, uint16_t e6, uint16_t e5, uint16_t e4,
uint16_t e3, uint16_t e2, uint16_t e1, uint16_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_epi16(
HEDLEY_STATIC_CAST(short, e7), HEDLEY_STATIC_CAST(short, e6), HEDLEY_STATIC_CAST(short, e5), HEDLEY_STATIC_CAST(short, e4),
HEDLEY_STATIC_CAST(short, e3), HEDLEY_STATIC_CAST(short, e2), HEDLEY_STATIC_CAST(short, e1), HEDLEY_STATIC_CAST(short, e0));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(uint16x8_t) uint16_t data[8] = { e0, e1, e2, e3, e4, e5, e6, e7 };
r_.neon_u16 = vld1q_u16(data);
#else
r_.u16[0] = e0; r_.u16[1] = e1; r_.u16[2] = e2; r_.u16[3] = e3;
r_.u16[4] = e4; r_.u16[5] = e5; r_.u16[6] = e6; r_.u16[7] = e7;
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set_epu32 (uint32_t e3, uint32_t e2, uint32_t e1, uint32_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_epi32(
HEDLEY_STATIC_CAST(int, e3), HEDLEY_STATIC_CAST(int, e2), HEDLEY_STATIC_CAST(int, e1), HEDLEY_STATIC_CAST(int, e0));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(uint32x4_t) uint32_t data[4] = { e0, e1, e2, e3 };
r_.neon_u32 = vld1q_u32(data);
#else
r_.u32[0] = e0;
r_.u32[1] = e1;
r_.u32[2] = e2;
r_.u32[3] = e3;
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set_epu64x (uint64_t e1, uint64_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0))
return _mm_set_epi64x(HEDLEY_STATIC_CAST(int64_t, e1), HEDLEY_STATIC_CAST(int64_t, e0));
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_LIKE_16(uint64x2_t) uint64_t data[2] = {e0, e1};
r_.neon_u64 = vld1q_u64(data);
#else
r_.u64[0] = e0;
r_.u64[1] = e1;
#endif
return simde__m128i_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_set_sd (simde_float64 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set_sd(a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
return vsetq_lane_f64(a, vdupq_n_f64(SIMDE_FLOAT64_C(0.0)), 0);
#else
return simde_mm_set_pd(SIMDE_FLOAT64_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set_sd(a) simde_mm_set_sd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set1_epi8 (int8_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set1_epi8(a);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vdupq_n_s8(a);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_splat(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i8 = vec_splats(HEDLEY_STATIC_CAST(signed char, a));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = a;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set1_epi8(a) simde_mm_set1_epi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set1_epi16 (int16_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set1_epi16(a);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vdupq_n_s16(a);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_splat(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i16 = vec_splats(HEDLEY_STATIC_CAST(signed short, a));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set1_epi16(a) simde_mm_set1_epi16(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set1_epi32 (int32_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_set1_epi32(a);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vdupq_n_s32(a);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_splat(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_splats(HEDLEY_STATIC_CAST(signed int, a));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set1_epi32(a) simde_mm_set1_epi32(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set1_epi64x (int64_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19,0,0))
return _mm_set1_epi64x(a);
#else
simde__m128i_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vdupq_n_s64(a);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_splat(a);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i64 = vec_splats(HEDLEY_STATIC_CAST(signed long long, a));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set1_epi64x(a) simde_mm_set1_epi64x(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_set1_epi64 (simde__m64 a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_set1_epi64(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
return simde_mm_set1_epi64x(a_.i64[0]);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_set1_epi64(a) simde_mm_set1_epi64(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set1_epu8 (uint8_t value) {
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return simde__m128i_from_altivec_u8(vec_splats(HEDLEY_STATIC_CAST(unsigned char, value)));
#else
return simde_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, value));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set1_epu16 (uint16_t value) {
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return simde__m128i_from_altivec_u16(vec_splats(HEDLEY_STATIC_CAST(unsigned short, value)));
#else
return simde_mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, value));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set1_epu32 (uint32_t value) {
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return simde__m128i_from_altivec_u32(vec_splats(HEDLEY_STATIC_CAST(unsigned int, value)));
#else
return simde_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, value));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_set1_epu64 (uint64_t value) {
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
return simde__m128i_from_altivec_u64(vec_splats(HEDLEY_STATIC_CAST(unsigned long long, value)));
#else
return simde_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, value));
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_setr_epi8 (int8_t e15, int8_t e14, int8_t e13, int8_t e12,
int8_t e11, int8_t e10, int8_t e9, int8_t e8,
int8_t e7, int8_t e6, int8_t e5, int8_t e4,
int8_t e3, int8_t e2, int8_t e1, int8_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_setr_epi8(
e15, e14, e13, e12, e11, e10, e9, e8,
e7, e6, e5, e4, e3, e2, e1, e0);
#else
return simde_mm_set_epi8(
e0, e1, e2, e3, e4, e5, e6, e7,
e8, e9, e10, e11, e12, e13, e14, e15);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setr_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_setr_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_setr_epi16 (int16_t e7, int16_t e6, int16_t e5, int16_t e4,
int16_t e3, int16_t e2, int16_t e1, int16_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0);
#else
return simde_mm_set_epi16(e0, e1, e2, e3, e4, e5, e6, e7);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0) simde_mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_setr_epi32 (int32_t e3, int32_t e2, int32_t e1, int32_t e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_setr_epi32(e3, e2, e1, e0);
#else
return simde_mm_set_epi32(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setr_epi32(e3, e2, e1, e0) simde_mm_setr_epi32(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_setr_epi64 (simde__m64 e1, simde__m64 e0) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_setr_epi64(e1, e0);
#else
return simde_mm_set_epi64(e0, e1);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setr_epi64(e1, e0) (simde_mm_setr_epi64((e1), (e0)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_setr_pd (simde_float64 e1, simde_float64 e0) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_setr_pd(e1, e0);
#else
return simde_mm_set_pd(e0, e1);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setr_pd(e1, e0) simde_mm_setr_pd(e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_setzero_pd (void) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_setzero_pd();
#else
return simde_mm_castsi128_pd(simde_mm_setzero_si128());
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_setzero_pd() simde_mm_setzero_pd()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_undefined_pd (void) {
simde__m128d_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE__HAVE_UNDEFINED128)
r_.n = _mm_undefined_pd();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128d_to_private(simde_mm_setzero_pd());
#endif
return simde__m128d_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_undefined_pd() simde_mm_undefined_pd()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_undefined_si128 (void) {
simde__m128i_private r_;
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE__HAVE_UNDEFINED128)
r_.n = _mm_undefined_si128();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128i_to_private(simde_mm_setzero_si128());
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_undefined_si128() (simde_mm_undefined_si128())
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_setone_pd (void) {
return simde_mm_castps_pd(simde_x_mm_setone_ps());
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_setone_si128 (void) {
return simde_mm_castps_si128(simde_x_mm_setone_ps());
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_shuffle_epi32 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[(imm8 >> (i * 2)) & 3];
}
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_shuffle_epi32(a, imm8) _mm_shuffle_epi32((a), (imm8))
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_epi32(a, imm8) (__extension__ ({ \
const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \
simde__m128i_from_private((simde__m128i_private) { .i32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
(simde__tmp_a_).i32, \
(simde__tmp_a_).i32, \
((imm8) ) & 3, \
((imm8) >> 2) & 3, \
((imm8) >> 4) & 3, \
((imm8) >> 6) & 3) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_shuffle_epi32(a, imm8) \
(__extension__ ({ \
const int32x4_t simde_mm_shuffle_epi32_a_ = simde__m128i_to_neon_i32(a); \
int32x4_t simde_mm_shuffle_epi32_r_; \
simde_mm_shuffle_epi32_r_ = vmovq_n_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, (imm8) & (0x3))); \
simde_mm_shuffle_epi32_r_ = vsetq_lane_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, ((imm8) >> 2) & 0x3), simde_mm_shuffle_epi32_r_, 1); \
simde_mm_shuffle_epi32_r_ = vsetq_lane_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, ((imm8) >> 4) & 0x3), simde_mm_shuffle_epi32_r_, 2); \
simde_mm_shuffle_epi32_r_ = vsetq_lane_s32(vgetq_lane_s32(simde_mm_shuffle_epi32_a_, ((imm8) >> 6) & 0x3), simde_mm_shuffle_epi32_r_, 3); \
vreinterpretq_s64_s32(simde_mm_shuffle_epi32_r_); \
}))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_shuffle_epi32(a, imm8) simde_mm_shuffle_epi32(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_shuffle_pd (simde__m128d a, simde__m128d b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.f64[0] = ((imm8 & 1) == 0) ? a_.f64[0] : a_.f64[1];
r_.f64[1] = ((imm8 & 2) == 0) ? b_.f64[0] : b_.f64[1];
return simde__m128d_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
#define simde_mm_shuffle_pd(a, b, imm8) _mm_shuffle_pd((a), (b), (imm8))
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shuffle_pd(a, b, imm8) (__extension__ ({ \
simde__m128d_from_private((simde__m128d_private) { .f64 = \
SIMDE_SHUFFLE_VECTOR_(64, 16, \
simde__m128d_to_private(a).f64, \
simde__m128d_to_private(b).f64, \
(((imm8) ) & 1), \
(((imm8) >> 1) & 1) + 2) }); }))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_shuffle_pd(a, b, imm8) simde_mm_shuffle_pd(a, b, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i++) {
r_.i16[i] = a_.i16[i];
}
for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[((imm8 >> ((i - 4) * 2)) & 3) + 4];
}
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_shufflehi_epi16(a, imm8) _mm_shufflehi_epi16((a), (imm8))
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shufflehi_epi16(a, imm8) (__extension__ ({ \
const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \
simde__m128i_from_private((simde__m128i_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 16, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
0, 1, 2, 3, \
(((imm8) ) & 3) + 4, \
(((imm8) >> 2) & 3) + 4, \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_shufflehi_epi16(a, imm8) \
(__extension__ ({ \
int16x8_t simde_mm_shufflehi_epi16_a_ = simde__m128i_to_neon_i16(a); \
int16x8_t simde_mm_shufflehi_epi16_r_ = simde_mm_shufflehi_epi16_a_; \
simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) ) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 4); \
simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) >> 2) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 5); \
simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) >> 4) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 6); \
simde_mm_shufflehi_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflehi_epi16_a_, (((imm8) >> 6) & 0x3) + 4), simde_mm_shufflehi_epi16_r_, 7); \
simde__m128i_from_neon_i16(simde_mm_shufflehi_epi16_r_); \
}))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_shufflehi_epi16(a, imm8) simde_mm_shufflehi_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_shufflelo_epi16 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
for (size_t i = 0 ; i < ((sizeof(r_.i16) / sizeof(r_.i16[0])) / 2) ; i++) {
r_.i16[i] = a_.i16[((imm8 >> (i * 2)) & 3)];
}
SIMDE_VECTORIZE
for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i];
}
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_shufflelo_epi16(a, imm8) _mm_shufflelo_epi16((a), (imm8))
#elif defined(SIMDE_SHUFFLE_VECTOR_)
#define simde_mm_shufflelo_epi16(a, imm8) (__extension__ ({ \
const simde__m128i_private simde__tmp_a_ = simde__m128i_to_private(a); \
simde__m128i_from_private((simde__m128i_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 16, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3), \
4, 5, 6, 7) }); }))
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_STATEMENT_EXPR_)
#define simde_mm_shufflelo_epi16(a, imm8) \
(__extension__({ \
int16x8_t simde_mm_shufflelo_epi16_a_ = simde__m128i_to_neon_i16(a); \
int16x8_t simde_mm_shufflelo_epi16_r_ = simde_mm_shufflelo_epi16_a_; \
simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) ) & 0x3)), simde_mm_shufflelo_epi16_r_, 0); \
simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) >> 2) & 0x3)), simde_mm_shufflelo_epi16_r_, 1); \
simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) >> 4) & 0x3)), simde_mm_shufflelo_epi16_r_, 2); \
simde_mm_shufflelo_epi16_r_ = vsetq_lane_s16(vgetq_lane_s16(simde_mm_shufflelo_epi16_a_, (((imm8) >> 6) & 0x3)), simde_mm_shufflelo_epi16_r_, 3); \
simde__m128i_from_neon_i16(simde_mm_shufflelo_epi16_r_); \
}))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_shufflelo_epi16(a, imm8) simde_mm_shufflelo_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sll_epi16 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sll_epi16(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
if (count_.u64[0] > 15)
return simde_mm_setzero_si128();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = (a_.u16 << count_.u64[0]);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vshlq_u16(a_.neon_u16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, count_.u64[0])));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = ((wasm_i64x2_extract_lane(count_.wasm_v128, 0) < 16) ? wasm_i16x8_shl(a_.wasm_v128, HEDLEY_STATIC_CAST(int32_t, wasm_i64x2_extract_lane(count_.wasm_v128, 0))) : wasm_i16x8_const(0,0,0,0,0,0,0,0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, (a_.u16[i] << count_.u64[0]));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sll_epi16(a, count) simde_mm_sll_epi16((a), (count))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sll_epi32 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sll_epi32(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
if (count_.u64[0] > 31)
return simde_mm_setzero_si128();
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = (a_.u32 << count_.u64[0]);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vshlq_u32(a_.neon_u32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, count_.u64[0])));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = ((wasm_i64x2_extract_lane(count_.wasm_v128, 0) < 32) ? wasm_i32x4_shl(a_.wasm_v128, HEDLEY_STATIC_CAST(int32_t, wasm_i64x2_extract_lane(count_.wasm_v128, 0))) : wasm_i32x4_const(0,0,0,0));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, (a_.u32[i] << count_.u64[0]));
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sll_epi32(a, count) (simde_mm_sll_epi32(a, (count)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sll_epi64 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sll_epi64(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
if (count_.u64[0] > 63)
return simde_mm_setzero_si128();
const int_fast16_t s = HEDLEY_STATIC_CAST(int_fast16_t, count_.u64[0]);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, s)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = (s < 64) ? wasm_i64x2_shl(a_.wasm_v128, s) : wasm_i64x2_const(0,0);
#else
#if !defined(SIMDE_BUG_GCC_94488)
SIMDE_VECTORIZE
#endif
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = a_.u64[i] << s;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sll_epi64(a, count) (simde_mm_sll_epi64(a, (count)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_sqrt_pd (simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sqrt_pd(a);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vsqrtq_f64(a_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f64 = vec_sqrt(a_.altivec_f64);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = simde_math_sqrt(a_.f64[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sqrt_pd(a) simde_mm_sqrt_pd(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_sqrt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sqrt_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_sqrt_pd(b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_sqrt_pd(simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(simde_math_sqrt)
r_.f64[0] = simde_math_sqrt(b_.f64[0]);
r_.f64[1] = a_.f64[1];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sqrt_sd(a, b) simde_mm_sqrt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srl_epi16 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_srl_epi16(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
const int cnt = HEDLEY_STATIC_CAST(int, (count_.i64[0] > 16 ? 16 : count_.i64[0]));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vshlq_u16(a_.neon_u16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = a_.u16[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srl_epi16(a, count) (simde_mm_srl_epi16(a, (count)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srl_epi32 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_srl_epi32(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
const int cnt = HEDLEY_STATIC_CAST(int, (count_.i64[0] > 32 ? 32 : count_.i64[0]));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vshlq_u32(a_.neon_u32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -cnt)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u32x4_shr(a_.wasm_v128, cnt);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srl_epi32(a, count) (simde_mm_srl_epi32(a, (count)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srl_epi64 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_srl_epi64(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
const int cnt = HEDLEY_STATIC_CAST(int, (count_.i64[0] > 64 ? 64 : count_.i64[0]));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, -cnt)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u64x2_shr(a_.wasm_v128, cnt);
#else
#if !defined(SIMDE_BUG_GCC_94488)
SIMDE_VECTORIZE
#endif
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = a_.u64[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srl_epi64(a, count) (simde_mm_srl_epi64(a, (count)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srai_epi16 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_RANGE(imm8, 0, 255) {
/* MSVC requires a range of (0, 255). */
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
const int cnt = (imm8 & ~15) ? 15 : imm8;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vshlq_s16(a_.neon_i16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, cnt);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_srai_epi16(a, imm8) _mm_srai_epi16((a), (imm8))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srai_epi16(a, imm8) simde_mm_srai_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srai_epi32 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_RANGE(imm8, 0, 255) {
/* MSVC requires a range of (0, 255). */
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
const int cnt = (imm8 & ~31) ? 31 : imm8;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(-cnt));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, cnt);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_srai_epi32(a, imm8) _mm_srai_epi32((a), (imm8))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srai_epi32(a, imm8) simde_mm_srai_epi32(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sra_epi16 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sra_epi16(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
const int cnt = HEDLEY_STATIC_CAST(int, (count_.i64[0] > 15 ? 15 : count_.i64[0]));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vshlq_s16(a_.neon_i16, vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, cnt);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sra_epi16(a, count) (simde_mm_sra_epi16(a, count))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sra_epi32 (simde__m128i a, simde__m128i count) {
#if defined(SIMDE_X86_SSE2_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_MM_SRA_EPI32)
return _mm_sra_epi32(a, count);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
count_ = simde__m128i_to_private(count);
const int cnt = count_.u64[0] > 31 ? 31 : HEDLEY_STATIC_CAST(int, count_.u64[0]);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -cnt)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, cnt);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] >> cnt;
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sra_epi32(a, count) (simde_mm_sra_epi32(a, (count)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_slli_epi16 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
if (HEDLEY_UNLIKELY((imm8 > 15))) {
return simde_mm_setzero_si128();
}
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i16 = a_.i16 << SIMDE_CAST_VECTOR_SHIFT_COUNT(8, imm8 & 0xff);
#else
const int s = (imm8 > HEDLEY_STATIC_CAST(int, sizeof(r_.i16[0]) * CHAR_BIT) - 1) ? 0 : imm8;
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i16[i] << s);
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_slli_epi16(a, imm8) _mm_slli_epi16(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_slli_epi16(a, imm8) \
(((imm8) <= 0) ? \
(a) : \
simde__m128i_from_neon_i16( \
((imm8) > 15) ? \
vandq_s16(simde__m128i_to_neon_i16(a), vdupq_n_s16(0)) : \
vshlq_n_s16(simde__m128i_to_neon_i16(a), ((imm8) & 15))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_mm_slli_epi16(a, imm8) \
((imm8 < 16) ? wasm_i16x8_shl(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i16x8_const(0,0,0,0,0,0,0,0))
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#define simde_mm_slli_epi16(a, imm8) \
((imm8 & ~15) ? simde_mm_setzero_si128() : simde__m128i_from_altivec_i16(vec_sl(simde__m128i_to_altivec_i16(a), vec_splat_u16(HEDLEY_STATIC_CAST(unsigned short, imm8)))))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_slli_epi16(a, imm8) simde_mm_slli_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_slli_epi32 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
if (HEDLEY_UNLIKELY((imm8 > 31))) {
return simde_mm_setzero_si128();
}
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i32 = a_.i32 << imm8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] << (imm8 & 0xff);
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_slli_epi32(a, imm8) _mm_slli_epi32(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_slli_epi32(a, imm8) \
(((imm8) <= 0) ? \
(a) : \
simde__m128i_from_neon_i32( \
((imm8) > 31) ? \
vandq_s32(simde__m128i_to_neon_i32(a), vdupq_n_s32(0)) : \
vshlq_n_s32(simde__m128i_to_neon_i32(a), ((imm8) & 31))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_mm_slli_epi32(a, imm8) \
((imm8 < 32) ? wasm_i32x4_shl(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i32x4_const(0,0,0,0))
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#define simde_mm_slli_epi32(a, imm8) \
(__extension__ ({ \
simde__m128i ret; \
if ((imm8) <= 0) { \
ret = a; \
} else if ((imm8) > 31) { \
ret = simde_mm_setzero_si128(); \
} else { \
ret = simde__m128i_from_altivec_i32( \
vec_sl(simde__m128i_to_altivec_i32(a), \
vec_splats(HEDLEY_STATIC_CAST(unsigned int, (imm8) & 31)))); \
} \
ret; \
}))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_slli_epi32(a, imm8) simde_mm_slli_epi32(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_slli_epi64 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
if (HEDLEY_UNLIKELY((imm8 > 63))) {
return simde_mm_setzero_si128();
}
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.i64 = a_.i64 << imm8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] << (imm8 & 0xff);
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_slli_epi64(a, imm8) _mm_slli_epi64(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_slli_epi64(a, imm8) \
(((imm8) <= 0) ? \
(a) : \
simde__m128i_from_neon_i64( \
((imm8) > 63) ? \
vandq_s64(simde__m128i_to_neon_i64(a), vdupq_n_s64(0)) : \
vshlq_n_s64(simde__m128i_to_neon_i64(a), ((imm8) & 63))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_mm_slli_epi64(a, imm8) \
((imm8 < 64) ? wasm_i64x2_shl(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i64x2_const(0,0))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_slli_epi64(a, imm8) simde_mm_slli_epi64(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srli_epi16 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
if (HEDLEY_UNLIKELY((imm8 > 15))) {
return simde_mm_setzero_si128();
}
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u16 = a_.u16 >> SIMDE_CAST_VECTOR_SHIFT_COUNT(8, imm8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.u16[i] = a_.u16[i] >> (imm8 & 0xff);
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_srli_epi16(a, imm8) _mm_srli_epi16(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_srli_epi16(a, imm8) \
(((imm8) <= 0) ? \
(a) : \
simde__m128i_from_neon_u16( \
((imm8) > 15) ? \
vandq_u16(simde__m128i_to_neon_u16(a), vdupq_n_u16(0)) : \
vshrq_n_u16(simde__m128i_to_neon_u16(a), ((imm8) & 15) | (((imm8) & 15) == 0))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_mm_srli_epi16(a, imm8) \
((imm8 < 16) ? wasm_u16x8_shr(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i16x8_const(0,0,0,0,0,0,0,0))
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#define simde_mm_srli_epi16(a, imm8) \
((imm8 & ~15) ? simde_mm_setzero_si128() : simde__m128i_from_altivec_i16(vec_sr(simde__m128i_to_altivec_i16(a), vec_splat_u16(HEDLEY_STATIC_CAST(unsigned short, imm8)))))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srli_epi16(a, imm8) simde_mm_srli_epi16(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srli_epi32 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
if (HEDLEY_UNLIKELY((imm8 > 31))) {
return simde_mm_setzero_si128();
}
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.u32 = a_.u32 >> SIMDE_CAST_VECTOR_SHIFT_COUNT(8, imm8 & 0xff);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.u32[i] = a_.u32[i] >> (imm8 & 0xff);
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_srli_epi32(a, imm8) _mm_srli_epi32(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_srli_epi32(a, imm8) \
(((imm8) <= 0) ? \
(a) : \
simde__m128i_from_neon_u32( \
((imm8) > 31) ? \
vandq_u32(simde__m128i_to_neon_u32(a), vdupq_n_u32(0)) : \
vshrq_n_u32(simde__m128i_to_neon_u32(a), ((imm8) & 31) | (((imm8) & 31) == 0))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_mm_srli_epi32(a, imm8) \
((imm8 < 32) ? wasm_u32x4_shr(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i32x4_const(0,0,0,0))
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
#define simde_mm_srli_epi32(a, imm8) \
(__extension__ ({ \
simde__m128i ret; \
if ((imm8) <= 0) { \
ret = a; \
} else if ((imm8) > 31) { \
ret = simde_mm_setzero_si128(); \
} else { \
ret = simde__m128i_from_altivec_i32( \
vec_sr(simde__m128i_to_altivec_i32(a), \
vec_splats(HEDLEY_STATIC_CAST(unsigned int, (imm8) & 31)))); \
} \
ret; \
}))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srli_epi32(a, imm8) simde_mm_srli_epi32(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_srli_epi64 (simde__m128i a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
if (HEDLEY_UNLIKELY((imm8 & 63) != imm8))
return simde_mm_setzero_si128();
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(-imm8));
#else
#if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_94488)
r_.u64 = a_.u64 >> SIMDE_CAST_VECTOR_SHIFT_COUNT(8, imm8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.u64[i] = a_.u64[i] >> imm8;
}
#endif
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
#define simde_mm_srli_epi64(a, imm8) _mm_srli_epi64(a, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_srli_epi64(a, imm8) \
(((imm8) <= 0) ? \
(a) : \
simde__m128i_from_neon_u64( \
((imm8) > 63) ? \
vandq_u64(simde__m128i_to_neon_u64(a), vdupq_n_u64(0)) : \
vshrq_n_u64(simde__m128i_to_neon_u64(a), ((imm8) & 63) | (((imm8) & 63) == 0))))
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
#define simde_mm_srli_epi64(a, imm8) \
((imm8 < 64) ? wasm_u64x2_shr(simde__m128i_to_private(a).wasm_v128, imm8) : wasm_i64x2_const(0,0))
#endif
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_srli_epi64(a, imm8) simde_mm_srli_epi64(a, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_store_pd(mem_addr, a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst1q_f64(mem_addr, simde__m128d_to_private(a).neon_f64);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_s64(HEDLEY_REINTERPRET_CAST(int64_t*, mem_addr), simde__m128d_to_private(a).neon_i64);
#else
simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128d), &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_store_pd(mem_addr, a) simde_mm_store_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_store1_pd(mem_addr, a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst1q_f64(mem_addr, vdupq_laneq_f64(a_.neon_f64, 0));
#else
mem_addr[0] = a_.f64[0];
mem_addr[1] = a_.f64[0];
#endif
#endif
}
#define simde_mm_store_pd1(mem_addr, a) simde_mm_store1_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_store1_pd(mem_addr, a) simde_mm_store1_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#define _mm_store_pd1(mem_addr, a) simde_mm_store_pd1(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_sd (simde_float64* mem_addr, simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_store_sd(mem_addr, a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
const simde_float64 v = vgetq_lane_f64(a_.neon_f64, 0);
simde_memcpy(mem_addr, &v, sizeof(v));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const int64_t v = vgetq_lane_s64(a_.neon_i64, 0);
simde_memcpy(HEDLEY_REINTERPRET_CAST(int64_t*, mem_addr), &v, sizeof(v));
#else
simde_float64 v = a_.f64[0];
simde_memcpy(mem_addr, &v, sizeof(simde_float64));
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_store_sd(mem_addr, a) simde_mm_store_sd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_si128 (simde__m128i* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_store_si128(HEDLEY_STATIC_CAST(__m128i*, mem_addr), a);
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_s32(HEDLEY_REINTERPRET_CAST(int32_t*, mem_addr), a_.neon_i32);
#else
simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128i), &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_store_si128(mem_addr, a) simde_mm_store_si128(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pd (simde_float64* mem_addr, simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_storeh_pd(mem_addr, a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
*mem_addr = vgetq_lane_f64(a_.neon_f64, 1);
#else
*mem_addr = a_.f64[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storeh_pd(mem_addr, a) simde_mm_storeh_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_epi64 (simde__m128i* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_storel_epi64(HEDLEY_STATIC_CAST(__m128i*, mem_addr), a);
#else
simde__m128i_private a_ = simde__m128i_to_private(a);
int64_t tmp;
/* memcpy to prevent aliasing, tmp because we can't take the
* address of a vector element. */
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
tmp = vgetq_lane_s64(a_.neon_i64, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
#if defined(SIMDE_BUG_GCC_95227)
(void) a_;
#endif
tmp = vec_extract(a_.altivec_i64, 0);
#else
tmp = a_.i64[0];
#endif
simde_memcpy(mem_addr, &tmp, sizeof(tmp));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storel_epi64(mem_addr, a) simde_mm_storel_epi64(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pd (simde_float64* mem_addr, simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_storel_pd(mem_addr, a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
simde_float64 tmp;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
tmp = vgetq_lane_f64(a_.neon_f64, 0);
#else
tmp = a_.f64[0];
#endif
simde_memcpy(mem_addr, &tmp, sizeof(tmp));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storel_pd(mem_addr, a) simde_mm_storel_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_pd (simde_float64 mem_addr[2], simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_storer_pd(mem_addr, a);
#else
simde__m128d_private a_ = simde__m128d_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_s64(HEDLEY_REINTERPRET_CAST(int64_t*, mem_addr), vextq_s64(a_.neon_i64, a_.neon_i64, 1));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 1, 0);
simde_mm_store_pd(mem_addr, simde__m128d_from_private(a_));
#else
mem_addr[0] = a_.f64[1];
mem_addr[1] = a_.f64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storer_pd(mem_addr, a) simde_mm_storer_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_pd (simde_float64* mem_addr, simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_storeu_pd(mem_addr, a);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
vst1q_f64(mem_addr, simde__m128d_to_private(a).neon_f64);
#else
simde_memcpy(mem_addr, &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storeu_pd(mem_addr, a) simde_mm_storeu_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_si128 (void* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_storeu_si128(HEDLEY_STATIC_CAST(__m128i*, mem_addr), a);
#else
simde_memcpy(mem_addr, &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storeu_si128(mem_addr, a) simde_mm_storeu_si128(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_si16 (void* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && ( \
SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \
HEDLEY_GCC_VERSION_CHECK(11,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(20,21,1))
_mm_storeu_si16(mem_addr, a);
#else
int16_t val = simde_x_mm_cvtsi128_si16(a);
simde_memcpy(mem_addr, &val, sizeof(val));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storeu_si16(mem_addr, a) simde_mm_storeu_si16(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_si32 (void* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && ( \
SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \
HEDLEY_GCC_VERSION_CHECK(11,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(20,21,1))
_mm_storeu_si32(mem_addr, a);
#else
int32_t val = simde_mm_cvtsi128_si32(a);
simde_memcpy(mem_addr, &val, sizeof(val));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storeu_si32(mem_addr, a) simde_mm_storeu_si32(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_si64 (void* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && ( \
SIMDE_DETECT_CLANG_VERSION_CHECK(8,0,0) || \
HEDLEY_GCC_VERSION_CHECK(11,0,0) || \
HEDLEY_INTEL_VERSION_CHECK(20,21,1))
_mm_storeu_si64(mem_addr, a);
#else
int64_t val = simde_mm_cvtsi128_si64(a);
simde_memcpy(mem_addr, &val, sizeof(val));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_storeu_si64(mem_addr, a) simde_mm_storeu_si64(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pd (simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)], simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_stream_pd(mem_addr, a);
#else
simde_memcpy(mem_addr, &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_stream_pd(mem_addr, a) simde_mm_stream_pd(HEDLEY_REINTERPRET_CAST(double*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_si128 (simde__m128i* mem_addr, simde__m128i a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
_mm_stream_si128(HEDLEY_STATIC_CAST(__m128i*, mem_addr), a);
#else
simde_memcpy(mem_addr, &a, sizeof(a));
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_stream_si128(mem_addr, a) simde_mm_stream_si128(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_si32 (int32_t* mem_addr, int32_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_stream_si32(mem_addr, a);
#else
*mem_addr = a;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_stream_si32(mem_addr, a) simde_mm_stream_si32(mem_addr, a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_si64 (int64_t* mem_addr, int64_t a) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(HEDLEY_MSVC_VERSION)
_mm_stream_si64(SIMDE_CHECKED_REINTERPRET_CAST(long long int*, int64_t*, mem_addr), a);
#else
*mem_addr = a;
#endif
}
#define simde_mm_stream_si64x(mem_addr, a) simde_mm_stream_si64(mem_addr, a)
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
#define _mm_stream_si64(mem_addr, a) simde_mm_stream_si64(SIMDE_CHECKED_REINTERPRET_CAST(int64_t*, __int64*, mem_addr), a)
#define _mm_stream_si64x(mem_addr, a) simde_mm_stream_si64(SIMDE_CHECKED_REINTERPRET_CAST(int64_t*, __int64*, mem_addr), a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sub_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sub_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vsubq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i8 = a_.i8 - b_.i8;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = a_.i8[i] - b_.i8[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_epi8(a, b) simde_mm_sub_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sub_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sub_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vsubq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i16 = a_.i16 - b_.i16;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i] - b_.i16[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_epi16(a, b) simde_mm_sub_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sub_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sub_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vsubq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 - b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] - b_.i32[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_epi32(a, b) simde_mm_sub_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_sub_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sub_epi64(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vsubq_s64(a_.neon_i64, b_.neon_i64);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 - b_.i64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
r_.i64[i] = a_.i64[i] - b_.i64[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_epi64(a, b) simde_mm_sub_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_sub_epu32 (simde__m128i a, simde__m128i b) {
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.u32 = a_.u32 - b_.u32;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vsubq_u32(a_.neon_u32, b_.neon_u32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] - b_.u32[i];
}
#endif
return simde__m128i_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_sub_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sub_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f64 = a_.f64 - b_.f64;
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vsubq_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_sub(a_.wasm_v128, b_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = a_.f64[i] - b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_pd(a, b) simde_mm_sub_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_sub_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_sub_sd(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0) && defined(SIMDE_FAST_EXCEPTIONS)
return simde_mm_move_sd(a, simde_mm_sub_pd(a, b));
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_sd(a, simde_mm_sub_pd(simde_x_mm_broadcastlow_pd(a), simde_x_mm_broadcastlow_pd(b)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
r_.f64[0] = a_.f64[0] - b_.f64[0];
r_.f64[1] = a_.f64[1];
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_sd(a, b) simde_mm_sub_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sub_si64 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sub_si64(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i64 = a_.i64 - b_.i64;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i64 = vsub_s64(a_.neon_i64, b_.neon_i64);
#else
r_.i64[0] = a_.i64[0] - b_.i64[0];
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_sub_si64(a, b) simde_mm_sub_si64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_subs_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_subs_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i8 = vqsubq_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_sub_sat(a_.wasm_v128, b_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i8[0])) ; i++) {
r_.i8[i] = simde_math_subs_i8(a_.i8[i], b_.i8[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_subs_epi8(a, b) simde_mm_subs_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_subs_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_subs_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vqsubq_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i16x8_sub_sat(a_.wasm_v128, b_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = simde_math_subs_i16(a_.i16[i], b_.i16[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_subs_epi16(a, b) simde_mm_subs_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_subs_epu8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_subs_epu8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vqsubq_u8(a_.neon_u8, b_.neon_u8);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u8x16_sub_sat(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_u8 = vec_subs(a_.altivec_u8, b_.altivec_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = simde_math_subs_u8(a_.u8[i], b_.u8[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_subs_epu8(a, b) simde_mm_subs_epu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_subs_epu16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_subs_epu16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vqsubq_u16(a_.neon_u16, b_.neon_u16);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_u16x8_sub_sat(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_u16 = vec_subs(a_.altivec_u16, b_.altivec_u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = simde_math_subs_u16(a_.u16[i], b_.u16[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_subs_epu16(a, b) simde_mm_subs_epu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_ucomieq_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
uint64x2_t a_or_b_nan = vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vandq_u64(a_not_nan, b_not_nan))));
uint64x2_t a_eq_b = vceqq_f64(a_.neon_f64, b_.neon_f64);
r = !!(vgetq_lane_u64(vorrq_u64(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) == wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f64[0] == b_.f64[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f64[0] == b_.f64[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_ucomieq_sd(a, b) simde_mm_ucomieq_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_ucomige_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
uint64x2_t a_and_b_not_nan = vandq_u64(a_not_nan, b_not_nan);
uint64x2_t a_ge_b = vcgeq_f64(a_.neon_f64, b_.neon_f64);
r = !!(vgetq_lane_u64(vandq_u64(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) >= wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f64[0] >= b_.f64[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f64[0] >= b_.f64[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_ucomige_sd(a, b) simde_mm_ucomige_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_ucomigt_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
uint64x2_t a_and_b_not_nan = vandq_u64(a_not_nan, b_not_nan);
uint64x2_t a_gt_b = vcgtq_f64(a_.neon_f64, b_.neon_f64);
r = !!(vgetq_lane_u64(vandq_u64(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) > wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f64[0] > b_.f64[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f64[0] > b_.f64[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_ucomigt_sd(a, b) simde_mm_ucomigt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_ucomile_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
uint64x2_t a_or_b_nan = vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vandq_u64(a_not_nan, b_not_nan))));
uint64x2_t a_le_b = vcleq_f64(a_.neon_f64, b_.neon_f64);
r = !!(vgetq_lane_u64(vorrq_u64(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) <= wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f64[0] <= b_.f64[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f64[0] <= b_.f64[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_ucomile_sd(a, b) simde_mm_ucomile_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_ucomilt_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
uint64x2_t a_or_b_nan = vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vandq_u64(a_not_nan, b_not_nan))));
uint64x2_t a_lt_b = vcltq_f64(a_.neon_f64, b_.neon_f64);
r = !!(vgetq_lane_u64(vorrq_u64(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) < wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f64[0] < b_.f64[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f64[0] < b_.f64[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_ucomilt_sd(a, b) simde_mm_ucomilt_sd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_sd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_ucomineq_sd(a, b);
#else
simde__m128d_private
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
uint64x2_t a_and_b_not_nan = vandq_u64(a_not_nan, b_not_nan);
uint64x2_t a_neq_b = vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(a_.neon_f64, b_.neon_f64))));
r = !!(vgetq_lane_u64(vandq_u64(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_f64x2_extract_lane(a_.wasm_v128, 0) != wasm_f64x2_extract_lane(b_.wasm_v128, 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f64[0] != b_.f64[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f64[0] != b_.f64[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_ucomineq_sd(a, b) simde_mm_ucomineq_sd(a, b)
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_lfence (void) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_lfence();
#else
simde_mm_sfence();
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_lfence() simde_mm_lfence()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_mfence (void) {
#if defined(SIMDE_X86_SSE2_NATIVE)
_mm_mfence();
#else
simde_mm_sfence();
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_mfence() simde_mm_mfence()
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpackhi_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpackhi_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i8 = vzip2q_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x8_t a1 = vreinterpret_s8_s16(vget_high_s16(a_.neon_i16));
int8x8_t b1 = vreinterpret_s8_s16(vget_high_s16(b_.neon_i16));
int8x8x2_t result = vzip_s8(a1, b1);
r_.neon_i8 = vcombine_s8(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, b_.i8, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i8[0])) / 2) ; i++) {
r_.i8[(i * 2)] = a_.i8[i + ((sizeof(r_) / sizeof(r_.i8[0])) / 2)];
r_.i8[(i * 2) + 1] = b_.i8[i + ((sizeof(r_) / sizeof(r_.i8[0])) / 2)];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpackhi_epi8(a, b) simde_mm_unpackhi_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpackhi_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpackhi_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i16 = vzip2q_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x4_t a1 = vget_high_s16(a_.neon_i16);
int16x4_t b1 = vget_high_s16(b_.neon_i16);
int16x4x2_t result = vzip_s16(a1, b1);
r_.neon_i16 = vcombine_s16(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, b_.i16, 4, 12, 5, 13, 6, 14, 7, 15);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i16[0])) / 2) ; i++) {
r_.i16[(i * 2)] = a_.i16[i + ((sizeof(r_) / sizeof(r_.i16[0])) / 2)];
r_.i16[(i * 2) + 1] = b_.i16[i + ((sizeof(r_) / sizeof(r_.i16[0])) / 2)];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpackhi_epi16(a, b) simde_mm_unpackhi_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpackhi_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpackhi_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i32 = vzip2q_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x2_t a1 = vget_high_s32(a_.neon_i32);
int32x2_t b1 = vget_high_s32(b_.neon_i32);
int32x2x2_t result = vzip_s32(a1, b1);
r_.neon_i32 = vcombine_s32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, b_.i32, 2, 6, 3, 7);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i32[0])) / 2) ; i++) {
r_.i32[(i * 2)] = a_.i32[i + ((sizeof(r_) / sizeof(r_.i32[0])) / 2)];
r_.i32[(i * 2) + 1] = b_.i32[i + ((sizeof(r_) / sizeof(r_.i32[0])) / 2)];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpackhi_epi32(a, b) simde_mm_unpackhi_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpackhi_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpackhi_epi64(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int64x1_t a_h = vget_high_s64(a_.neon_i64);
int64x1_t b_h = vget_high_s64(b_.neon_i64);
r_.neon_i64 = vcombine_s64(a_h, b_h);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.i64, b_.i64, 1, 3);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i64[0])) / 2) ; i++) {
r_.i64[(i * 2)] = a_.i64[i + ((sizeof(r_) / sizeof(r_.i64[0])) / 2)];
r_.i64[(i * 2) + 1] = b_.i64[i + ((sizeof(r_) / sizeof(r_.i64[0])) / 2)];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpackhi_epi64(a, b) simde_mm_unpackhi_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_unpackhi_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpackhi_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vzip2q_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 1, 3);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 1, 3);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.f64[0])) / 2) ; i++) {
r_.f64[(i * 2)] = a_.f64[i + ((sizeof(r_) / sizeof(r_.f64[0])) / 2)];
r_.f64[(i * 2) + 1] = b_.f64[i + ((sizeof(r_) / sizeof(r_.f64[0])) / 2)];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpackhi_pd(a, b) simde_mm_unpackhi_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpacklo_epi8 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpacklo_epi8(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i8 = vzip1q_s8(a_.neon_i8, b_.neon_i8);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(a_.neon_i16));
int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(b_.neon_i16));
int8x8x2_t result = vzip_s8(a1, b1);
r_.neon_i8 = vcombine_s8(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, b_.i8, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i8[0])) / 2) ; i++) {
r_.i8[(i * 2)] = a_.i8[i];
r_.i8[(i * 2) + 1] = b_.i8[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpacklo_epi8(a, b) simde_mm_unpacklo_epi8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpacklo_epi16 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpacklo_epi16(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i16 = vzip1q_s16(a_.neon_i16, b_.neon_i16);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int16x4_t a1 = vget_low_s16(a_.neon_i16);
int16x4_t b1 = vget_low_s16(b_.neon_i16);
int16x4x2_t result = vzip_s16(a1, b1);
r_.neon_i16 = vcombine_s16(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, b_.i16, 0, 8, 1, 9, 2, 10, 3, 11);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i16[0])) / 2) ; i++) {
r_.i16[(i * 2)] = a_.i16[i];
r_.i16[(i * 2) + 1] = b_.i16[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpacklo_epi16(a, b) simde_mm_unpacklo_epi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpacklo_epi32 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpacklo_epi32(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_i32 = vzip1q_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int32x2_t a1 = vget_low_s32(a_.neon_i32);
int32x2_t b1 = vget_low_s32(b_.neon_i32);
int32x2x2_t result = vzip_s32(a1, b1);
r_.neon_i32 = vcombine_s32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, b_.i32, 0, 4, 1, 5);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i32[0])) / 2) ; i++) {
r_.i32[(i * 2)] = a_.i32[i];
r_.i32[(i * 2) + 1] = b_.i32[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpacklo_epi32(a, b) simde_mm_unpacklo_epi32(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_unpacklo_epi64 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpacklo_epi64(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
int64x1_t a_l = vget_low_s64(a_.neon_i64);
int64x1_t b_l = vget_low_s64(b_.neon_i64);
r_.neon_i64 = vcombine_s64(a_l, b_l);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.i64, b_.i64, 0, 2);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.i64[0])) / 2) ; i++) {
r_.i64[(i * 2)] = a_.i64[i];
r_.i64[(i * 2) + 1] = b_.i64[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpacklo_epi64(a, b) simde_mm_unpacklo_epi64(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_mm_unpacklo_pd (simde__m128d a, simde__m128d b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_unpacklo_pd(a, b);
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a),
b_ = simde__m128d_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vzip1q_f64(a_.neon_f64, b_.neon_f64);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 0, 2);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(r_) / sizeof(r_.f64[0])) / 2) ; i++) {
r_.f64[(i * 2)] = a_.f64[i];
r_.f64[(i * 2) + 1] = b_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_unpacklo_pd(a, b) simde_mm_unpacklo_pd(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128d
simde_x_mm_negate_pd(simde__m128d a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return simde_mm_xor_pd(a, _mm_set1_pd(SIMDE_FLOAT64_C(-0.0)));
#else
simde__m128d_private
r_,
a_ = simde__m128d_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f64 = vec_neg(a_.altivec_f64);
#elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f64 = vnegq_f64(a_.neon_f64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f64x2_neg(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f64 = -a_.f64;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
r_.f64[i] = -a_.f64[i];
}
#endif
return simde__m128d_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_mm_xor_si128 (simde__m128i a, simde__m128i b) {
#if defined(SIMDE_X86_SSE2_NATIVE)
return _mm_xor_si128(a, b);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a),
b_ = simde__m128i_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = a_.i32f[i] ^ b_.i32f[i];
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _mm_xor_si128(a, b) simde_mm_xor_si128(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128i
simde_x_mm_not_si128 (simde__m128i a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
return _mm_ternarylogic_epi32(a, a, a, 0x55);
#else
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = ~a_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
r_.i32f[i] = ~(a_.i32f[i]);
}
#endif
return simde__m128i_from_private(r_);
#endif
}
#define SIMDE_MM_SHUFFLE2(x, y) (((x) << 1) | (y))
#if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
#define _MM_SHUFFLE2(x, y) SIMDE_MM_SHUFFLE2(x, y)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE2_H) */
/* :: End x86/sse2.h :: */
|
dcraw.c | #ifndef IGNOREALL
/*
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2015 by Dave Coffin, dcoffin a cybercom o net
This is a command-line ANSI C program to convert raw photos from
any digital camera on any computer running any operating system.
No license is required to download and use dcraw.c. However,
to lawfully redistribute dcraw, you must either (a) offer, at
no extra charge, full source code* for all executable files
containing RESTRICTED functions, (b) distribute this code under
the GPL Version 2 or later, (c) remove all RESTRICTED functions,
re-implement them, or copy them from an earlier, unrestricted
Revision of dcraw.c, or (d) purchase a license from the author.
The functions that process Foveon images have been RESTRICTED
since Revision 1.237. All other code remains free for all uses.
*If you have not modified dcraw.c in any way, a link to my
homepage qualifies as "full source code".
$Revision: 1.44 $
$Date: 2015/03/08 19:19:51 $
make -f Makefile.devel
git commit -a -m "v.102"
git push
*/
/*@out DEFINES
#ifndef USE_JPEG
#define NO_JPEG
#endif
#ifndef USE_JASPER
#define NO_JASPER
#endif
@end DEFINES */
#define NO_LCMS
#define DCRAW_VERBOSE
//@out DEFINES
#define DCRAW_VERSION "9.24"
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define _USE_MATH_DEFINES
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/types.h>
//@end DEFINES
#if defined(DJGPP) || defined(__MINGW32__)
#define fseeko fseek
#define ftello ftell
#else
#define fgetc getc_unlocked
#endif
//@out DEFINES
#ifdef __CYGWIN__
#include <io.h>
#endif
#ifdef WIN32
#include <sys/utime.h>
#include <winsock2.h>
#pragma comment(lib, "ws2_32.lib")
#define snprintf _snprintf
#define strcasecmp stricmp
#define strncasecmp strnicmp
//@end DEFINES
typedef __int64 INT64;
typedef unsigned __int64 UINT64;
//@out DEFINES
#else
#include <unistd.h>
#include <utime.h>
#include <netinet/in.h>
typedef long long INT64;
typedef unsigned long long UINT64;
#endif
#ifdef NODEPS
#define NO_JASPER
#define NO_JPEG
#define NO_LCMS
#endif
#ifndef NO_JASPER
#include <jasper/jasper.h> /* Decode Red camera movies */
#endif
#ifndef NO_JPEG
#include <jpeglib.h> /* Decode compressed Kodak DC120 photos */
#endif /* and Adobe Lossy DNGs */
#ifndef NO_LCMS
#ifdef USE_LCMS
#include <lcms.h> /* Support color profiles */
#else
#include <lcms2.h> /* Support color profiles */
#endif
#endif
#ifdef LOCALEDIR
#include <libintl.h>
#define _(String) gettext(String)
#else
#define _(String) (String)
#endif
#ifdef LJPEG_DECODE
#error Please compile dcraw.c by itself.
#error Do not link it with ljpeg_decode.
#endif
#ifndef LONG_BIT
#define LONG_BIT (8 * sizeof (long))
#endif
//@end DEFINES
#if !defined(uchar)
#define uchar unsigned char
#endif
#if !defined(ushort)
#define ushort unsigned short
#endif
/*
All global variables are defined here, and all functions that
access them are prefixed with "CLASS". Note that a thread-safe
C++ class cannot have non-const static local variables.
*/
FILE *ifp, *ofp;
short order;
const char *ifname;
char *meta_data, xtrans[6][6], xtrans_abs[6][6];
char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64],software[64];
float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len;
time_t timestamp;
off_t strip_offset, data_offset;
off_t thumb_offset, meta_offset, profile_offset;
unsigned shot_order, kodak_cbpp, exif_cfa, unique_id;
unsigned thumb_length, meta_length, profile_length;
unsigned thumb_misc, *oprof, fuji_layout, shot_select=0, multi_out=0;
unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress;
unsigned black, maximum, mix_green, raw_color, zero_is_bad;
unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error;
unsigned tile_width, tile_length, gpsdata[32], load_flags;
unsigned flip, tiff_flip, filters, colors;
ushort raw_height, raw_width, height, width, top_margin, left_margin;
ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height;
ushort *raw_image, (*image)[4], cblack[4102];
ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4];
double pixel_aspect, aber[4]={1,1,1,1}, gamm[6]={ 0.45,4.5,0,0,0,0 };
float bright=1, user_mul[4]={0,0,0,0}, threshold=0;
int mask[8][4];
int half_size=0, four_color_rgb=0, document_mode=0, highlight=0;
int verbose=0, use_auto_wb=0, use_camera_wb=0, use_camera_matrix=1;
int output_color=1, output_bps=8, output_tiff=0, med_passes=0;
int no_auto_bright=0;
unsigned greybox[4] = { 0, 0, UINT_MAX, UINT_MAX };
float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4];
const double xyz_rgb[3][3] = { /* XYZ from RGB */
{ 0.412453, 0.357580, 0.180423 },
{ 0.212671, 0.715160, 0.072169 },
{ 0.019334, 0.119193, 0.950227 } };
const float d65_white[3] = { 0.950456, 1, 1.088754 };
int histogram[4][0x2000];
void (*write_thumb)(), (*write_fun)();
void (*load_raw)(), (*thumb_load_raw)();
jmp_buf failure;
struct decode {
struct decode *branch[2];
int leaf;
} first_decode[2048], *second_decode, *free_decode;
struct tiff_ifd {
int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes;
int t_tile_width, t_tile_length;
} tiff_ifd[10];
struct ph1 {
int format, key_off, tag_21a;
int t_black, split_col, black_col, split_row, black_row;
float tag_210;
} ph1;
#define CLASS
//@out DEFINES
#define FORC(cnt) for (c=0; c < cnt; c++)
#define FORC3 FORC(3)
#define FORC4 FORC(4)
#define FORCC FORC(colors)
#define SQR(x) ((x)*(x))
#define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31))
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define LIM(x,min,max) MAX(min,MIN(x,max))
#define ULIM(x,y,z) ((y) < (z) ? LIM(x,y,z) : LIM(x,z,y))
#define CLIP(x) LIM(x,0,65535)
#define SWAP(a,b) { a=a+b; b=a-b; a=a-b; }
#define my_swap(type, i, j) {type t = i; i = j; j = t;}
/*
In order to inline this calculation, I make the risky
assumption that all filter patterns can be described
by a repeating pattern of eight rows and two columns
Do not use the FC or BAYER macros with the Leaf CatchLight,
because its pattern is 16x16, not 2x8.
Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2
PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1
0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M
1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C
2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y
3 C Y C Y C Y 3 G M G M G M 3 G M G M G M
4 C Y C Y C Y 4 Y C Y C Y C
PowerShot A5 5 G M G M G M 5 G M G M G M
0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y
7 M G M G M G 7 M G M G M G
0 1 2 3 4 5
0 C Y C Y C Y
1 G M G M G M
2 C Y C Y C Y
3 M G M G M G
All RGB cameras use one of these Bayer grids:
0x16161616: 0x61616161: 0x49494949: 0x94949494:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G
1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B
2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G
3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B
*/
#define RAW(row,col) \
raw_image[(row)*raw_width+(col)]
//@end DEFINES
#define FC(row,col) \
(filters >> ((((row) << 1 & 14) + ((col) & 1)) << 1) & 3)
//@out DEFINES
#define BAYER(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][FC(row,col)]
#define BAYER2(row,col) \
image[((row) >> shrink)*iwidth + ((col) >> shrink)][fcol(row,col)]
//@end DEFINES
/* @out COMMON
#include <math.h>
#define CLASS LibRaw::
#include "libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#define LIBRAW_IO_REDEFINED
#include "libraw.h"
#include "libraw_defines.h"
#include "var_defines.h"
@end COMMON */
//@out COMMON
int CLASS fcol (int row, int col)
{
static const char filter[16][16] =
{ { 2,1,1,3,2,3,2,0,3,2,3,0,1,2,1,0 },
{ 0,3,0,2,0,1,3,1,0,1,1,2,0,3,3,2 },
{ 2,3,3,2,3,1,1,3,3,1,2,1,2,0,0,3 },
{ 0,1,0,1,0,2,0,2,2,0,3,0,1,3,2,1 },
{ 3,1,1,2,0,1,0,2,1,3,1,3,0,1,3,0 },
{ 2,0,0,3,3,2,3,1,2,0,2,0,3,2,2,1 },
{ 2,3,3,1,2,1,2,1,2,1,1,2,3,0,0,1 },
{ 1,0,0,2,3,0,0,3,0,3,0,3,2,1,2,3 },
{ 2,3,3,1,1,2,1,0,3,2,3,0,2,3,1,3 },
{ 1,0,2,0,3,0,3,2,0,1,1,2,0,1,0,2 },
{ 0,1,1,3,3,2,2,1,1,3,3,0,2,1,3,2 },
{ 2,3,2,0,0,1,3,0,2,0,1,2,3,0,1,0 },
{ 1,3,1,2,3,2,3,2,0,2,0,1,1,0,3,0 },
{ 0,2,0,3,1,0,0,1,1,3,3,2,3,2,2,1 },
{ 2,1,3,2,3,1,2,1,0,3,0,2,0,2,0,2 },
{ 0,3,1,0,0,2,0,3,2,1,3,1,1,3,1,3 } };
if (filters == 1) return filter[(row+top_margin)&15][(col+left_margin)&15];
if (filters == 9) return xtrans[(row+6) % 6][(col+6) % 6];
return FC(row,col);
}
#ifndef __GLIBC__
char *my_memmem (char *haystack, size_t haystacklen,
char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp (c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr (char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
//@end COMMON
void CLASS merror (void *ptr, const char *where)
{
if (ptr) return;
fprintf (stderr,_("%s: Out of memory in %s\n"), ifname, where);
longjmp (failure, 1);
}
void CLASS derror()
{
if (!data_error) {
fprintf (stderr, "%s: ", ifname);
if (feof(ifp))
fprintf (stderr,_("Unexpected end of file\n"));
else
fprintf (stderr,_("Corrupt data near 0x%llx\n"), (INT64) ftello(ifp));
}
data_error++;
}
//@out COMMON
ushort CLASS sget2 (uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
// DNG was written by:
#define CameraDNG 1
#define AdobeDNG 2
#ifdef LIBRAW_LIBRARY_BUILD
static ushort saneSonyCameraInfo(uchar a, uchar b, uchar c, uchar d, uchar e, uchar f){
if ((a >> 4) > 9) return 0;
else if ((a & 0x0f) > 9) return 0;
else if ((b >> 4) > 9) return 0;
else if ((b & 0x0f) > 9) return 0;
else if ((c >> 4) > 9) return 0;
else if ((c & 0x0f) > 9) return 0;
else if ((d >> 4) > 9) return 0;
else if ((d & 0x0f) > 9) return 0;
else if ((e >> 4) > 9) return 0;
else if ((e & 0x0f) > 9) return 0;
else if ((f >> 4) > 9) return 0;
else if ((f & 0x0f) > 9) return 0;
return 1;
}
static ushort bcd2dec(uchar data){
if ((data >> 4) > 9) return 0;
else if ((data & 0x0f) > 9) return 0;
else return (data >> 4) * 10 + (data & 0x0f);
}
static uchar SonySubstitution[257] = "\x00\x01\x32\xb1\x0a\x0e\x87\x28\x02\xcc\xca\xad\x1b\xdc\x08\xed\x64\x86\xf0\x4f\x8c\x6c\xb8\xcb\x69\xc4\x2c\x03\x97\xb6\x93\x7c\x14\xf3\xe2\x3e\x30\x8e\xd7\x60\x1c\xa1\xab\x37\xec\x75\xbe\x23\x15\x6a\x59\x3f\xd0\xb9\x96\xb5\x50\x27\x88\xe3\x81\x94\xe0\xc0\x04\x5c\xc6\xe8\x5f\x4b\x70\x38\x9f\x82\x80\x51\x2b\xc5\x45\x49\x9b\x21\x52\x53\x54\x85\x0b\x5d\x61\xda\x7b\x55\x26\x24\x07\x6e\x36\x5b\x47\xb7\xd9\x4a\xa2\xdf\xbf\x12\x25\xbc\x1e\x7f\x56\xea\x10\xe6\xcf\x67\x4d\x3c\x91\x83\xe1\x31\xb3\x6f\xf4\x05\x8a\x46\xc8\x18\x76\x68\xbd\xac\x92\x2a\x13\xe9\x0f\xa3\x7a\xdb\x3d\xd4\xe7\x3a\x1a\x57\xaf\x20\x42\xb2\x9e\xc3\x8b\xf2\xd5\xd3\xa4\x7e\x1f\x98\x9c\xee\x74\xa5\xa6\xa7\xd8\x5e\xb0\xb4\x34\xce\xa8\x79\x77\x5a\xc1\x89\xae\x9a\x11\x33\x9d\xf5\x39\x19\x65\x78\x16\x71\xd2\xa9\x44\x63\x40\x29\xba\xa0\x8f\xe4\xd6\x3b\x84\x0d\xc2\x4e\x58\xdd\x99\x22\x6b\xc9\xbb\x17\x06\xe5\x7d\x66\x43\x62\xf6\xcd\x35\x90\x2e\x41\x8d\x6d\xaa\x09\x73\x95\x0c\xf1\x1d\xde\x4c\x2f\x2d\xf7\xd1\x72\xeb\xef\x48\xc7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
ushort CLASS sget2Rev(uchar *s) // specific to some Canon Makernotes fields, where they have endian in reverse
{
if (order == 0x4d4d) /* "II" means little-endian, and we reverse to "MM" - big endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian... */
return s[0] << 8 | s[1];
}
#endif
ushort CLASS get2()
{
uchar str[2] = { 0xff,0xff };
fread (str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4 (uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = { 0xff,0xff,0xff,0xff };
fread (str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint (int type)
{
return type == 3 ? get2() : get4();
}
float CLASS int_to_float (int i)
{
union { int i; float f; } u;
u.i = i;
return u.f;
}
double CLASS getreal (int type)
{
union { char c[8]; double d; } u,v;
int i, rev;
switch (type) {
case 3: return (unsigned short) get2();
case 4: return (unsigned int) get4();
case 5:
u.d = (unsigned int) get4();
v.d = (unsigned int)get4();
return u.d / (v.d ? v.d : 1);
case 8: return (signed short) get2();
case 9: return (signed int) get4();
case 10:
u.d = (signed int) get4();
v.d = (signed int)get4();
return u.d / (v.d?v.d:1);
case 11: return int_to_float (get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i=0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default: return fgetc(ifp);
}
}
void CLASS read_shorts (ushort *pixel, int count)
{
if (fread (pixel, 2, count, ifp) < count) derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab ((char*)pixel, (char*)pixel, count*2);
}
void CLASS cubic_spline (const int *x_, const int *y_, const int len)
{
float **A, *b, *c, *d, *x, *y;
int i, j;
A = (float **) calloc (((2*len + 4)*sizeof **A + sizeof *A), 2*len);
if (!A) return;
A[0] = (float *) (A + 2*len);
for (i = 1; i < 2*len; i++)
A[i] = A[0] + 2*len*i;
y = len + (x = i + (d = i + (c = i + (b = A[0] + i*i))));
for (i = 0; i < len; i++) {
x[i] = x_[i] / 65535.0;
y[i] = y_[i] / 65535.0;
}
for (i = len-1; i > 0; i--) {
b[i] = (y[i] - y[i-1]) / (x[i] - x[i-1]);
d[i-1] = x[i] - x[i-1];
}
for (i = 1; i < len-1; i++) {
A[i][i] = 2 * (d[i-1] + d[i]);
if (i > 1) {
A[i][i-1] = d[i-1];
A[i-1][i] = d[i-1];
}
A[i][len-1] = 6 * (b[i+1] - b[i]);
}
for(i = 1; i < len-2; i++) {
float v = A[i+1][i] / A[i][i];
for(j = 1; j <= len-1; j++)
A[i+1][j] -= v * A[i][j];
}
for(i = len-2; i > 0; i--) {
float acc = 0;
for(j = i; j <= len-2; j++)
acc += A[i][j]*c[j];
c[i] = (A[i][len-1] - acc) / A[i][i];
}
for (i = 0; i < 0x10000; i++) {
float x_out = (float)(i / 65535.0);
float y_out = 0;
for (j = 0; j < len-1; j++) {
if (x[j] <= x_out && x_out <= x[j+1]) {
float v = x_out - x[j];
y_out = y[j] +
((y[j+1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j+1] * d[j])/6) * v
+ (c[j] * 0.5) * v*v + ((c[j+1] - c[j]) / (6 * d[j])) * v*v*v;
}
}
curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 :
(ushort)(y_out * 65535.0 + 0.5));
}
free (A);
}
void CLASS canon_600_fixed_wb (int temp)
{
static const short mul[4][5] = {
{ 667, 358,397,565,452 },
{ 731, 390,367,499,517 },
{ 1119, 396,348,448,537 },
{ 1399, 485,431,508,688 } };
int lo, hi, i;
float frac=0;
for (lo=4; --lo; )
if (*mul[lo] <= temp) break;
for (hi=0; hi < 3; hi++)
if (*mul[hi] >= temp) break;
if (lo != hi)
frac = (float) (temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i=1; i < 5; i++)
pre_mul[i-1] = 1 / (frac * mul[hi][i] + (1-frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color (int ratio[2], int mar)
{
int clipped=0, target, miss;
if (flash_used) {
if (ratio[1] < -104)
{ ratio[1] = -104; clipped = 1; }
if (ratio[1] > 12)
{ ratio[1] = 12; clipped = 1; }
} else {
if (ratio[1] < -264 || ratio[1] > 461) return 2;
if (ratio[1] < -50)
{ ratio[1] = -50; clipped = 1; }
if (ratio[1] > 307)
{ ratio[1] = 307; clipped = 1; }
}
target = flash_used || ratio[1] < 197
? -38 - (398 * ratio[1] >> 10)
: -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] &&
target + 20 >= ratio[0] && !clipped) return 0;
miss = target - ratio[0];
if (abs(miss) >= mar*4) return 2;
if (miss < -20) miss = -20;
if (miss > mar) miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = { 0,0 };
int test[8], total[2][8], ratio[2][2], stat[2];
memset (&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10) mar = 150;
else if (i > 12) mar = 20;
else mar = 280 - 20 * i;
if (flash_used) mar = 80;
for (row=14; row < height-14; row+=4)
for (col=10; col < width; col+=2) {
for (i=0; i < 8; i++)
test[(i & 4) + FC(row+(i >> 1),col+(i & 1))] =
BAYER(row+(i >> 1),col+(i & 1));
for (i=0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500) goto next;
for (i=0; i < 4; i++)
if (abs(test[i] - test[i+4]) > 50) goto next;
for (i=0; i < 2; i++) {
for (j=0; j < 4; j+=2)
ratio[i][j >> 1] = ((test[i*4+j+1]-test[i*4+j]) << 10) / test[i*4+j];
stat[i] = canon_600_color (ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1) goto next;
for (i=0; i < 2; i++)
if (stat[i])
for (j=0; j < 2; j++)
test[i*4+j*2+1] = test[i*4+j*2] * (0x400 + ratio[i][j]) >> 10;
for (i=0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next: ;
}
if (count[0] | count[1]) {
st = count[0]*200 < count[1];
for (i=0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i+4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -1203,1715,-1136,1648, 1388,-876,267,245, -1641,2153,3921,-3409 },
{ -615,1127,-1563,2075, 1437,-925,509,3, -756,1268,2519,-2007 },
{ -190,702,-1886,2398, 2153,-1641,763,-251, -452,964,3040,-2528 },
{ -190,702,-1878,2390, 1861,-1349,905,-393, -432,944,2617,-2105 },
{ -807,1319,-1785,2297, 1388,-876,769,-257, -230,742,2067,-1555 } };
int t=0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789) t=1;
if (mc > 1.28 && mc <= 2) {
if (yc < 0.8789) t=3;
else if (yc <= 2) t=4;
}
if (flash_used) t=5;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i*4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow=row=0; irow < height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data, 1, 1120, ifp) < 1120) derror();
pix = raw_image + row*raw_width;
for (dp=data; dp < data+1120; dp+=10, pix+=8) {
pix[0] = (dp[0] << 2) + (dp[1] >> 6 );
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6 );
}
if ((row+=2) > height) row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] =
{ { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } };
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
if ((val = BAYER(row,col) - black) < 0) val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row,col) = val;
}
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row=0; row < 100; row++) {
fseek (ifp, row*3340 + 3284, SEEK_SET);
if (getc(ifp) > 15) return 1;
}
return 0;
}
unsigned CLASS getbithuff (int nbits, ushort *huff)
{
#ifdef LIBRAW_NOTHREADS
static unsigned bitbuf=0;
static int vbits=0, reset=0;
#else
#define bitbuf tls->getbits.bitbuf
#define vbits tls->getbits.vbits
#define reset tls->getbits.reset
#endif
unsigned c;
if (nbits > 25) return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0) return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF &&
!(reset = zero_after_ff && c == 0xff && fgetc(ifp))) {
bitbuf = (bitbuf << 8) + (uchar) c;
vbits += 8;
}
c = bitbuf << (32-vbits) >> (32-nbits);
if (huff) {
vbits -= huff[c] >> 8;
c = (uchar) huff[c];
} else
vbits -= nbits;
if (vbits < 0) derror();
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#undef reset
#endif
}
#define getbits(n) getbithuff(n,0)
#define gethuff(h) getbithuff(*h,h+1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort * CLASS make_decoder_ref (const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max=16; max && !count[max]; max--);
huff = (ushort *) calloc (1 + (1 << max), sizeof *huff);
merror (huff, "make_decoder()");
huff[0] = max;
for (h=len=1; len <= max; len++)
for (i=0; i < count[len]; i++, ++*source)
for (j=0; j < 1 << (max-len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort * CLASS make_decoder (const uchar *source)
{
return make_decoder_ref (&source);
}
void CLASS crw_init_tables (unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
{ 0,2,2,3,1,1,1,1,2,0,0,0,0,0,0,0,
0x03,0x02,0x04,0x01,0x05,0x00,0x06,0x07,0x09,0x08,0x0a,0x0b,0xff },
{ 0,0,6,3,1,1,2,0,0,0,0,0,0,0,0,0,
0x06,0x05,0x07,0x04,0x08,0x03,0x09,0x02,0x00,0x0a,0x01,0x0b,0xff },
};
static const uchar second_tree[3][180] = {
{ 0,2,2,2,1,4,2,1,2,5,1,1,0,0,0,139,
0x03,0x04,0x02,0x05,0x01,0x06,0x07,0x08,
0x12,0x13,0x11,0x14,0x09,0x15,0x22,0x00,0x21,0x16,0x0a,0xf0,
0x23,0x17,0x24,0x31,0x32,0x18,0x19,0x33,0x25,0x41,0x34,0x42,
0x35,0x51,0x36,0x37,0x38,0x29,0x79,0x26,0x1a,0x39,0x56,0x57,
0x28,0x27,0x52,0x55,0x58,0x43,0x76,0x59,0x77,0x54,0x61,0xf9,
0x71,0x78,0x75,0x96,0x97,0x49,0xb7,0x53,0xd7,0x74,0xb6,0x98,
0x47,0x48,0x95,0x69,0x99,0x91,0xfa,0xb8,0x68,0xb5,0xb9,0xd6,
0xf7,0xd8,0x67,0x46,0x45,0x94,0x89,0xf8,0x81,0xd5,0xf6,0xb4,
0x88,0xb1,0x2a,0x44,0x72,0xd9,0x87,0x66,0xd4,0xf5,0x3a,0xa7,
0x73,0xa9,0xa8,0x86,0x62,0xc7,0x65,0xc8,0xc9,0xa1,0xf4,0xd1,
0xe9,0x5a,0x92,0x85,0xa6,0xe7,0x93,0xe8,0xc1,0xc6,0x7a,0x64,
0xe1,0x4a,0x6a,0xe6,0xb3,0xf1,0xd3,0xa5,0x8a,0xb2,0x9a,0xba,
0x84,0xa4,0x63,0xe5,0xc5,0xf3,0xd2,0xc4,0x82,0xaa,0xda,0xe4,
0xf2,0xca,0x83,0xa3,0xa2,0xc3,0xea,0xc2,0xe2,0xe3,0xff,0xff },
{ 0,2,2,1,4,1,4,1,3,3,1,0,0,0,0,140,
0x02,0x03,0x01,0x04,0x05,0x12,0x11,0x06,
0x13,0x07,0x08,0x14,0x22,0x09,0x21,0x00,0x23,0x15,0x31,0x32,
0x0a,0x16,0xf0,0x24,0x33,0x41,0x42,0x19,0x17,0x25,0x18,0x51,
0x34,0x43,0x52,0x29,0x35,0x61,0x39,0x71,0x62,0x36,0x53,0x26,
0x38,0x1a,0x37,0x81,0x27,0x91,0x79,0x55,0x45,0x28,0x72,0x59,
0xa1,0xb1,0x44,0x69,0x54,0x58,0xd1,0xfa,0x57,0xe1,0xf1,0xb9,
0x49,0x47,0x63,0x6a,0xf9,0x56,0x46,0xa8,0x2a,0x4a,0x78,0x99,
0x3a,0x75,0x74,0x86,0x65,0xc1,0x76,0xb6,0x96,0xd6,0x89,0x85,
0xc9,0xf5,0x95,0xb4,0xc7,0xf7,0x8a,0x97,0xb8,0x73,0xb7,0xd8,
0xd9,0x87,0xa7,0x7a,0x48,0x82,0x84,0xea,0xf4,0xa6,0xc5,0x5a,
0x94,0xa4,0xc6,0x92,0xc3,0x68,0xb5,0xc8,0xe4,0xe5,0xe6,0xe9,
0xa2,0xa3,0xe3,0xc2,0x66,0x67,0x93,0xaa,0xd4,0xd5,0xe7,0xf8,
0x88,0x9a,0xd7,0x77,0xc4,0x64,0xe2,0x98,0xa5,0xca,0xda,0xe8,
0xf3,0xf6,0xa9,0xb2,0xb3,0xf2,0xd2,0x83,0xba,0xd3,0xff,0xff },
{ 0,0,6,2,1,3,3,2,5,1,2,2,8,10,0,117,
0x04,0x05,0x03,0x06,0x02,0x07,0x01,0x08,
0x09,0x12,0x13,0x14,0x11,0x15,0x0a,0x16,0x17,0xf0,0x00,0x22,
0x21,0x18,0x23,0x19,0x24,0x32,0x31,0x25,0x33,0x38,0x37,0x34,
0x35,0x36,0x39,0x79,0x57,0x58,0x59,0x28,0x56,0x78,0x27,0x41,
0x29,0x77,0x26,0x42,0x76,0x99,0x1a,0x55,0x98,0x97,0xf9,0x48,
0x54,0x96,0x89,0x47,0xb7,0x49,0xfa,0x75,0x68,0xb6,0x67,0x69,
0xb9,0xb8,0xd8,0x52,0xd7,0x88,0xb5,0x74,0x51,0x46,0xd9,0xf8,
0x3a,0xd6,0x87,0x45,0x7a,0x95,0xd5,0xf6,0x86,0xb4,0xa9,0x94,
0x53,0x2a,0xa8,0x43,0xf5,0xf7,0xd4,0x66,0xa7,0x5a,0x44,0x8a,
0xc9,0xe8,0xc8,0xe7,0x9a,0x6a,0x73,0x4a,0x61,0xc7,0xf4,0xc6,
0x65,0xe9,0x72,0xe6,0x71,0x91,0x93,0xa6,0xda,0x92,0x85,0x62,
0xf3,0xc5,0xb2,0xa4,0x84,0xba,0x64,0xa5,0xb3,0xd2,0x81,0xe5,
0xd3,0xaa,0xc4,0xca,0xf2,0xb1,0xe4,0xd1,0x83,0x63,0xea,0xc3,
0xe2,0x82,0xf1,0xa3,0xc2,0xa1,0xc1,0xe3,0xa2,0xe1,0xff,0xff }
};
if (table > 2) table = 2;
huff[0] = make_decoder ( first_tree[table]);
huff[1] = make_decoder (second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret=1, i;
fseek (ifp, 0, SEEK_SET);
fread (test, 1, sizeof test, ifp);
for (i=540; i < sizeof test - 1; i++)
if (test[i] == 0xff) {
if (test[i+1]) return 1;
ret=0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry=0, pnum=0, base[2];
crw_init_tables (tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits) maximum = 0x3ff;
fseek (ifp, 540 + lowbits*raw_height*raw_width/4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row+=8) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
nblocks = MIN (8, raw_height-row) * raw_width >> 6;
for (block=0; block < nblocks; block++) {
memset (diffbuf, 0, sizeof diffbuf);
for (i=0; i < 64; i++ ) {
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i) break;
if (leaf == 0xff) continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0) continue;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
if (i < 64) diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i=0; i < 64; i++ ) {
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits) {
save = ftell(ifp);
fseek (ifp, 26 + row*raw_width/4, SEEK_SET);
for (prow=pixel, i=0; i < raw_width*2; i++) {
c = fgetc(ifp);
for (r=0; r < 8; r+=2, prow++) {
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512) val += 2;
*prow = val;
}
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
FORC(2) free (huff[c]);
throw;
}
#endif
FORC(2) free (huff[c]);
}
//@end COMMON
/*
Not a full implementation of Lossless JPEG, just
enough to decode Canon, Kodak and Adobe DNG images.
*/
struct jhead {
int bits, high, wide, clrs, sraw, psv, restart, vpred[6];
ushort *huff[6], *free[4], *row;
};
//@out COMMON
int CLASS ljpeg_start (struct jhead *jh, int info_only)
{
int c, tag, len;
uchar data[0x10000];
const uchar *dp;
memset (jh, 0, sizeof *jh);
jh->restart = INT_MAX;
fread (data, 2, 1, ifp);
if (data[1] != 0xd8) return 0;
do {
fread (data, 2, 2, ifp);
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
// printf ("\n*** ljpeg_start pos= %llx tag= %x, len= %d", ftell(ifp)-4, tag, len);
if (tag <= 0xff00) return 0;
fread (data, 1, len, ifp);
switch (tag) {
case 0xffc3: // start of frame; lossless, Huffman
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
// printf ("\n*** %x: startraw= %d", tag, jh->sraw);
case 0xffc0: // start of frame; baseline jpeg
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (!strcmp(model, "EOS 5DS"))
{
jh->wide = data[1] << 8 | data[2];
jh->high = data[3] << 8 | data[4];
}
// printf ("\n*** %x: bits= %d; high= %d; wide= %d; clrs= %d",
// tag, jh->bits, jh->high, jh->wide, jh->clrs);
if (len == 9 && !dng_version) getc(ifp);
break;
case 0xffc4: // define Huffman tables
if (info_only) break;
for (dp = data; dp < data+len && (c = *dp++) < 4; )
jh->free[c] = jh->huff[c] = make_decoder_ref (&dp);
break;
case 0xffda: // start of scan
jh->psv = data[1+data[0]*2];
jh->bits -= data[3+data[0]*2] & 15;
break;
case 0xffdd: // define restart interval
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
// printf ("\n");
if (info_only) return 1;
if (jh->clrs > 6 || !jh->huff[0]) return 0;
FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c];
if (jh->sraw) {
FORC(4) jh->huff[2+c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1+c] = jh->huff[0];
}
jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4);
merror (jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end (struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free (jh->free[c]);
free (jh->row);
}
int CLASS ljpeg_diff (ushort *huff)
{
int len, diff;
if(!huff)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort * CLASS ljpeg_row (int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred=0;
ushort mark=0, *row[3];
if (jrow * jh->wide % jh->restart == 0) {
FORC(6) jh->vpred[c] = 1 << (jh->bits-1);
if (jrow) {
fseek (ifp, -2, SEEK_CUR);
do mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide*jh->clrs*((jrow+c) & 1);
for (col=0; col < jh->wide; col++)
FORC(jh->clrs) {
diff = ljpeg_diff (jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col) pred = row[0][-jh->clrs];
else pred = (jh->vpred[c] += diff) - diff;
if (jrow && col) switch (jh->psv) {
case 1: break;
case 2: pred = row[1][0]; break;
case 3: pred = row[1][-jh->clrs]; break;
case 4: pred = pred + row[1][0] - row[1][-jh->clrs]; break;
case 5: pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1); break;
case 6: pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1); break;
case 7: pred = (pred + row[1][0]) >> 1; break;
default: pred = 0;
}
if ((**row = pred + diff) >> jh->bits) derror();
if (c <= jh->sraw) spred = **row;
row[0]++; row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jrow, jcol, val, jidx, i, j, row=0, col=0;
struct jhead jh;
ushort *rp;
// printf ("\n*** lossless_jpeg_load_raw\n");
if (!ljpeg_start (&jh, 0)) return;
if(jh.wide<1 || jh.high<1 || jh.clrs<1 || jh.bits <1)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 2);
#endif
jwide = jh.wide * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height-1-jrow/2 : jrow/2;
for (jcol=0; jcol < jwide; jcol++) {
val = curve[*rp++];
if (cr2_slice[0]) {
jidx = jrow*jwide + jcol;
i = jidx / (cr2_slice[1]*jh.high);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1]*jh.high);
row = jidx / cr2_slice[1+j];
col = jidx % cr2_slice[1+j] + i*cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--,raw_width);
if(row>raw_height)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp (failure, 3);
#endif
if ((unsigned) row < raw_height) RAW(row,col) = val;
if (++col >= raw_width)
col = (row++,0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw;
}
#endif
ljpeg_end (&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp=0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow=0, jcol=0, pix[3], c;
int v[3]={0,0,0}, ver, hue;
char *cp;
if (!ljpeg_start (&jh, 0) || jh.clrs < 4) return;
jwide = (jh.wide >>= 1) * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (ecol=slice=0; slice <= cr2_slice[0]; slice++) {
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width-1) ecol = raw_width & -2;
for (row=0; row < height; row += (jh.clrs >> 1) - 1) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
ip = (short (*)[4]) image + row*width;
for (col=scol; col < ecol; col+=2, jcol+=jh.clrs) {
if ((jcol %= jwide) == 0)
rp = (short *) ljpeg_row (jrow++, &jh);
if (col >= width) continue;
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sraw_ycc>=2)
{
FORC (jh.clrs-2)
{
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col + (c >> 1)*width + (c & 1)][1] = ip[col + (c >> 1)*width + (c & 1)][2] = 8192;
}
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else if(imgdata.params.sraw_ycc)
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 8192;
ip[col][2] = rp[jcol+jh.clrs-1] - 8192;
}
else
#endif
{
FORC (jh.clrs-2)
ip[col + (c >> 1)*width + (c & 1)][0] = rp[jcol+c];
ip[col][1] = rp[jcol+jh.clrs-2] - 16384;
ip[col][2] = rp[jcol+jh.clrs-1] - 16384;
}
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sraw_ycc>=2)
{
ljpeg_end (&jh);
maximum = 0x3fff;
return;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (cp=model2; *cp && !isdigit(*cp); cp++);
sscanf (cp, "%d.%d.%d", v, v+1, v+2);
ver = (v[0]*1000 + v[1])*1000 + v[2];
hue = (jh.sraw+1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short (*)[4]) image;
rp = ip[0];
for (row=0; row < height; row++, ip+=width) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (row & (jh.sraw >> 1))
for (col=0; col < width; col+=2)
for (c=1; c < 3; c++)
if (row == height-1)
ip[col][c] = ip[col-width][c];
else ip[col][c] = (ip[col-width][c] + ip[col+width][c] + 1) >> 1;
for (col=1; col < width; col+=2)
for (c=1; c < 3; c++)
if (col == width-1)
ip[col][c] = ip[col-1][c];
else ip[col][c] = (ip[col-1][c] + ip[col+1][c] + 1) >> 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(!imgdata.params.sraw_ycc)
#endif
for ( ; rp < ip[0]; rp+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (unique_id == 0x80000218 ||
unique_id == 0x80000250 ||
unique_id == 0x80000261 ||
unique_id == 0x80000281 ||
unique_id == 0x80000287) {
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + (( 50*rp[1] + 22929*rp[2]) >> 14);
pix[1] = rp[0] + ((-5640*rp[1] - 11751*rp[2]) >> 14);
pix[2] = rp[0] + ((29040*rp[1] - 101*rp[2]) >> 14);
} else {
if (unique_id < 0x80000218) rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778*rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP(pix[c] * sraw_mul[c] >> 10);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
ljpeg_end (&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel (unsigned row, unsigned col, ushort **rp)
{
int c;
if (is_raw == 2 && shot_select) (*rp)++;
if (raw_image) {
if (row < raw_height && col < raw_width)
RAW(row,col) = curve[**rp];
*rp += is_raw;
} else {
if (row < height && col < width)
FORC(tiff_samples)
image[row*width+col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
}
if (is_raw == 2 && shot_select) (*rp)--;
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow=0, tcol=0, jwide, jrow, jcol, row, col;
struct jhead jh;
ushort *rp;
while (trow < raw_height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
if (!ljpeg_start (&jh, 0)) break;
jwide = jh.wide;
if (filters) jwide *= jh.clrs;
jwide /= is_raw;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=col=jrow=0; jrow < jh.high; jrow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row (jrow, &jh);
for (jcol=0; jcol < jwide; jcol++) {
adobe_copy_pixel (trow+row, tcol+col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
ljpeg_end (&jh);
throw ;
}
#endif
fseek (ifp, save+4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end (&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *) calloc (raw_width, tiff_samples*sizeof *pixel);
merror (pixel, "packed_dng_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (tiff_bps == 16)
read_shorts (pixel, raw_width * tiff_samples);
else {
getbits(-1);
for (col=0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp=pixel, col=0; col < raw_width; col++)
adobe_copy_pixel (row, col, &rp);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (pixel);
throw ;
}
#endif
free (pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0,0},{0,0}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek (ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i=bit[0][c]; i <= ((bit[0][c]+(4096 >> bit[1][c])-1) & 4095); )
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS nikon_coolscan_load_raw()
{
int bufsize = width*3*tiff_bps/8;
if(tiff_bps <= 8)
gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,255);
else
gamma_curve(1.0/imgdata.params.coolscan_nef_gamma,0.,1,65535);
fseek (ifp, data_offset, SEEK_SET);
unsigned char *buf = (unsigned char*)malloc(bufsize);
unsigned short *ubuf = (unsigned short *)buf;
for(int row = 0; row < raw_height; row++)
{
int red = fread (buf, 1, bufsize, ifp);
unsigned short (*ip)[4] = (unsigned short (*)[4]) image + row*width;
if(tiff_bps <= 8)
for(int col=0; col<width;col++)
{
ip[col][0] = curve[buf[col*3]];
ip[col][1] = curve[buf[col*3+1]];
ip[col][2] = curve[buf[col*3+2]];
ip[col][3]=0;
}
else
for(int col=0; col<width;col++)
{
ip[col][0] = curve[ubuf[col*3]];
ip[col][1] = curve[ubuf[col*3+1]];
ip[col][2] = curve[ubuf[col*3+2]];
ip[col][3]=0;
}
}
free(buf);
}
#endif
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy */
5,4,3,6,2,7,1,0,8,9,11,10,12 },
{ 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0, /* 12-bit lossy after split */
0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0, /* 12-bit lossless */
5,4,6,3,7,2,8,1,9,0,10,11,12 },
{ 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0, /* 14-bit lossy */
5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
{ 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0, /* 14-bit lossy after split */
8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
{ 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0, /* 14-bit lossless */
7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step=0, tree=0, split=0, row, col, len, shl, diff;
fseek (ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek (ifp, 2110, SEEK_CUR);
if (ver0 == 0x46) tree = 2;
if (tiff_bps == 14) tree += 3;
read_shorts (vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize-1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0) {
for (i=0; i < csize; i++)
curve[i*step] = get2();
for (i=0; i < max; i++)
curve[i] = ( curve[i-i%step]*(step-i%step) +
curve[i-i%step+step]*(i%step) ) / step;
fseek (ifp, meta_offset+562, SEEK_SET);
split = get2();
} else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts (curve, max=csize);
while (curve[max-2] == curve[max-1]) max--;
huff = make_decoder (nikon_tree[tree]);
fseek (ifp, data_offset, SEEK_SET);
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (min=row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (split && row == split) {
free (huff);
huff = make_decoder (nikon_tree[tree+1]);
max += (min = 16) << 1;
}
for (col=0; col < raw_width; col++) {
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len-shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max) derror();
RAW(row,col) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free (huff);
throw;
}
#endif
free (huff);
}
void CLASS nikon_yuv_load_raw()
{
int row, col, yuv[4], rgb[3], b, c;
UINT64 bitbuf=0;
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if (!(b = col & 1)) {
bitbuf = 0;
FORC(6) bitbuf |= (UINT64) fgetc(ifp) << c*8;
FORC(4) yuv[c] = (bitbuf >> c*12 & 0xfff) - (c >> 1 << 11);
}
rgb[0] = yuv[b] + 1.370705*yuv[3];
rgb[1] = yuv[b] - 0.337633*yuv[2] - 0.698001*yuv[3];
rgb[2] = yuv[b] + 1.732446*yuv[2];
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,0xfff)] / cam_mul[c];
}
}
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = { 0x00, 0x55, 0xaa, 0xff };
memset (histo, 0, sizeof histo);
fseek (ifp, -2000, SEEK_END);
for (i=0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i=0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek (ifp, 0, SEEK_SET);
for (i=0; i < 1024; i++) {
fread (t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4
& t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct {
int bits;
char t_make[12], t_model[15];
} table[] = {
{ 0x00, "Pentax", "Optio 33WR" },
{ 0x03, "Nikon", "E3200" },
{ 0x32, "Nikon", "E3700" },
{ 0x33, "Olympus", "C740UZ" } };
fseek (ifp, 3072, SEEK_SET);
fread (dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i=0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits) {
strcpy (make, table[i].t_make );
strcpy (model, table[i].t_model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek (ifp, -sizeof tail, SEEK_END);
fread (tail, 1, sizeof tail, ifp);
for (nz=i=0; i < sizeof tail; i++)
if (tail[i]) nz++;
return nz > 20;
}
//@end COMMON
void CLASS jpeg_thumb();
//@out COMMON
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) malloc (thumb_length);
merror (thumb, "ppm_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread (thumb, 1, thumb_length, ifp);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width*thumb_height*3;
thumb = (char *) calloc (thumb_length, 2);
merror (thumb, "ppm16_thumb()");
read_shorts ((ushort *) thumb, thumb_length);
for (i=0; i < thumb_length; i++)
thumb[i] = ((ushort *) thumb)[i] >> 8;
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite (thumb, 1, thumb_length, ofp);
free (thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = { "012","102" };
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width*thumb_height;
thumb = (char *) calloc (colors, thumb_length);
merror (thumb, "layer_thumb()");
fprintf (ofp, "P%d\n%d %d\n255\n",
5 + (colors >> 1), thumb_width, thumb_height);
fread (thumb, thumb_length, colors, ifp);
for (i=0; i < thumb_length; i++)
FORCC putc (thumb[i+thumb_length*(map[thumb_misc >> 8][c]-'0')], ofp);
free (thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *) calloc (thumb_length, 2);
merror (thumb, "rollei_thumb()");
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts (thumb, thumb_length);
for (i=0; i < thumb_length; i++) {
putc (thumb[i] << 3, ofp);
putc (thumb[i] >> 5 << 2, ofp);
putc (thumb[i] >> 11 << 3, ofp);
}
free (thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten=0, isix, i, buffer=0, todo[16];
isix = raw_width * raw_height * 5 / 8;
while (fread (pixel, 1, 10, ifp) == 10) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (i=0; i < 10; i+=2) {
todo[i] = iten++;
todo[i+1] = pixel[i] << 8 | pixel[i+1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for ( ; i < 16; i+=2) {
todo[i] = isix++;
todo[i+1] = buffer >> (14-i)*5;
}
for (i=0; i < 16; i+=2)
raw_image[todo[i]] = (todo[i+1] & 0x3ff);
}
maximum = 0x3ff;
}
int CLASS raw (unsigned row, unsigned col)
{
return (row < raw_height && col < raw_width) ? RAW(row,col) : 0;
}
void CLASS phase_one_flat_field (int is_float, int nc)
{
ushort head[8];
unsigned wide, high, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts (head, 8);
if (head[2] * head[3] * head[4] * head[5] == 0) return;
wide = head[2] / head[4] + (head[2] % head[4] != 0);
high = head[3] / head[5] + (head[3] % head[5] != 0);
mrow = (float *) calloc (nc*wide, sizeof *mrow);
merror (mrow, "phase_one_flat_field()");
for (y=0; y < high; y++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2) {
num = is_float ? getreal(11) : get2()/32768.0;
if (y==0) mrow[c*wide+x] = num;
else mrow[(c+1)*wide+x] = (num - mrow[c*wide+x]) / head[5];
}
if (y==0) continue;
rend = head[1] + y*head[5];
for (row = rend-head[5];
row < raw_height && row < rend &&
row < head[1]+head[3]-head[5]; row++) {
for (x=1; x < wide; x++) {
for (c=0; c < nc; c+=2) {
mult[c] = mrow[c*wide+x-1];
mult[c+1] = (mrow[c*wide+x] - mult[c]) / head[4];
}
cend = head[0] + x*head[4];
for (col = cend-head[4];
col < raw_width &&
col < cend && col < head[0]+head[2]-head[4]; col++) {
c = nc > 2 ? FC(row-top_margin,col-left_margin) : 0;
if (!(c & 1)) {
c = RAW(row,col) * mult[c];
RAW(row,col) = LIM(c,0,65535);
}
for (c=0; c < nc; c+=2)
mult[c] += mult[c+1];
}
}
for (x=0; x < wide; x++)
for (c=0; c < nc; c+=2)
mrow[c*wide+x] += mrow[(c+1)*wide+x];
}
}
free (mrow);
}
int CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff=INT_MAX, off_412=0;
static const signed char dir[12][2] =
{ {-1,-1}, {-1,1}, {1,-1}, {1,1}, {-2,0}, {0,-2}, {0,2}, {2,0},
{-2,-2}, {-2,2}, {2,-2}, {2,2} };
float poly[8], num, cfrac, frac, mult[2], *yval[2];
ushort *xval[2];
int qmult_applied = 0, qlin_applied = 0;
if (half_size || !meta_length) return 0;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Phase One correction...\n"));
#endif
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (entries--) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x419) { /* Polynomial curve */
for (get4(), i=0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i=0; i < 0x10000; i++) {
num = (poly[5]*i + poly[3])*i + poly[1];
curve[i] = LIM(num,0,65535);
} goto apply; /* apply to right half */
} else if (tag == 0x41a) { /* Polynomial curve */
for (i=0; i < 4; i++)
poly[i] = getreal(11);
for (i=0; i < 0x10000; i++) {
for (num=0, j=4; j--; )
num = num * i + poly[j];
curve[i] = LIM(num+i,0,65535);
} apply: /* apply to whole image */
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (tag & 1)*ph1.split_col; col < raw_width; col++)
RAW(row,col) = curve[RAW(row,col)];
}
} else if (tag == 0x400) { /* Sensor defects */
while ((len -= 8) >= 0) {
col = get2();
row = get2();
type = get2(); get2();
if (col >= raw_width) continue;
if (type == 131 || type == 137) /* Bad column */
for (row=0; row < raw_height; row++)
if (FC(row-top_margin,col-left_margin) == 1) {
for (sum=i=0; i < 4; i++)
sum += val[i] = raw (row+dir[i][0], col+dir[i][1]);
for (max=i=0; i < 4; i++) {
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i]) max = i;
}
RAW(row,col) = (sum - val[max])/3.0 + 0.5;
} else {
for (sum=0, i=8; i < 12; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = 0.5 + sum * 0.0732233 +
(raw(row,col-2) + raw(row,col+2)) * 0.3535534;
}
else if (type == 129) { /* Bad pixel */
if (row >= raw_height) continue;
j = (FC(row-top_margin,col-left_margin) != 1) * 4;
for (sum=0, i=j; i < j+8; i++)
sum += raw (row+dir[i][0], col+dir[i][1]);
RAW(row,col) = (sum + 4) >> 3;
}
}
} else if (tag == 0x401) { /* All-color flat fields */
phase_one_flat_field (1, 2);
} else if (tag == 0x416 || tag == 0x410) {
phase_one_flat_field (0, 2);
} else if (tag == 0x40b) { /* Red+blue flat field */
phase_one_flat_field (0, 4);
} else if (tag == 0x412) {
fseek (ifp, 36, SEEK_CUR);
diff = abs (get2() - ph1.tag_21a);
if (mindiff > diff) {
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
} else if (tag == 0x41f && !qlin_applied) { /* Quadrant linearization */
ushort lc[2][2][16], ref[16];
int qr, qc;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 16; i++)
lc[qr][qc][i] = (ushort)get4();
for (i = 0; i < 16; i++) {
int v = 0;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
v += lc[qr][qc][i];
ref[i] = (v + 2) >> 2;
}
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[19], cf[19];
for (i = 0; i < 16; i++) {
cx[1+i] = lc[qr][qc][i];
cf[1+i] = ref[i];
}
cx[0] = cf[0] = 0;
cx[17] = cf[17] = ((unsigned int)ref[15] * 65535) / lc[qr][qc][15];
cf[18] = cx[18] = 65535;
cubic_spline(cx, cf, 19);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
}
qlin_applied = 1;
} else if (tag == 0x41e && !qmult_applied) { /* Quadrant multipliers */
float qmult[2][2] = { { 1, 1 }, { 1, 1 } };
get4(); get4(); get4(); get4();
qmult[0][0] = 1.0 + getreal(11);
get4(); get4(); get4(); get4(); get4();
qmult[0][1] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][0] = 1.0 + getreal(11);
get4(); get4(); get4();
qmult[1][1] = 1.0 + getreal(11);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
i = qmult[row >= ph1.split_row][col >= ph1.split_col] * RAW(row,col);
RAW(row,col) = LIM(i,0,65535);
}
}
qmult_applied = 1;
} else if (tag == 0x431 && !qmult_applied) { /* Quadrant combined */
ushort lc[2][2][7], ref[7];
int qr, qc;
for (i = 0; i < 7; i++)
ref[i] = (ushort)get4();
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 7; i++)
lc[qr][qc][i] = (ushort)get4();
for (qr = 0; qr < 2; qr++) {
for (qc = 0; qc < 2; qc++) {
int cx[9], cf[9];
for (i = 0; i < 7; i++) {
cx[1+i] = ref[i];
cf[1+i] = ((unsigned int)ref[i] * lc[qr][qc][i]) / 10000;
}
cx[0] = cf[0] = 0;
cx[8] = cf[8] = 65535;
cubic_spline(cx, cf, 9);
for (row = (qr ? ph1.split_row : 0);
row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0);
col < (qc ? raw_width : ph1.split_col); col++)
RAW(row,col) = curve[RAW(row,col)];
}
}
}
qmult_applied = 1;
qlin_applied = 1;
}
fseek (ifp, save, SEEK_SET);
}
if (off_412) {
fseek (ifp, off_412, SEEK_SET);
for (i=0; i < 9; i++) head[i] = get4() & 0x7fff;
yval[0] = (float *) calloc (head[1]*head[3] + head[2]*head[4], 6);
merror (yval[0], "phase_one_correct()");
yval[1] = (float *) (yval[0] + head[1]*head[3]);
xval[0] = (ushort *) (yval[1] + head[2]*head[4]);
xval[1] = (ushort *) (xval[0] + head[1]*head[3]);
get2();
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
yval[i][j] = getreal(11);
for (i=0; i < 2; i++)
for (j=0; j < head[i+1]*head[i+3]; j++)
xval[i][j] = get2();
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
cfrac = (float) col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row,col) * 0.5;
for (i=cip; i < cip+2; i++) {
for (k=j=0; j < head[1]; j++)
if (num < xval[0][k = head[1]*i+j]) break;
frac = (j == 0 || j == head[1]) ? 0 :
(xval[0][k] - num) / (xval[0][k] - xval[0][k-1]);
mult[i-cip] = yval[0][k-1] * frac + yval[0][k] * (1-frac);
}
i = ((mult[0] * (1-cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row,col) = LIM(i,0,65535);
}
}
free (yval[0]);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
return LIBRAW_CANCELLED_BY_CALLBACK;
}
#endif
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, t_mask;
fseek (ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
t_mask = ph1.format == 1 ? 0x5555:0x1354;
#ifdef LIBRAW_LIBRARY_BUILD
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw()");
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw()");
if (ph1.black_col)
{
fseek (ifp, ph1.black_col, SEEK_SET);
read_shorts ((ushort *)imgdata.rawdata.ph1_cblack[0], raw_height*2);
}
if (ph1.black_row)
{
fseek (ifp, ph1.black_row, SEEK_SET);
read_shorts ((ushort *) imgdata.rawdata.ph1_rblack[0], raw_width*2);
}
}
#endif
fseek (ifp, data_offset, SEEK_SET);
read_shorts (raw_image, raw_width*raw_height);
if (ph1.format)
for (i=0; i < raw_width*raw_height; i+=2) {
a = raw_image[i+0] ^ akey;
b = raw_image[i+1] ^ bkey;
raw_image[i+0] = (a & t_mask) | (b & ~t_mask);
raw_image[i+1] = (b & t_mask) | (a & ~t_mask);
}
}
unsigned CLASS ph1_bithuff (int nbits, ushort *huff)
{
#ifndef LIBRAW_NOTHREADS
#define bitbuf tls->ph1_bits.bitbuf
#define vbits tls->ph1_bits.vbits
#else
static UINT64 bitbuf=0;
static int vbits=0;
#endif
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0) return 0;
if (vbits < nbits) {
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64-vbits) >> (64-nbits);
if (huff) {
vbits -= huff[c] >> 8;
return (uchar) huff[c];
}
vbits -= nbits;
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#endif
}
#define ph1_bits(n) ph1_bithuff(n,0)
#define ph1_huff(h) ph1_bithuff(*h,h+1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*c_black)[2], (*r_black)[2];
#ifdef LIBRAW_LIBRARY_BUILD
if(ph1.format == 6)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width*3 + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
c_black = (short (*)[2]) (offset + raw_height);
fseek (ifp, ph1.black_col, SEEK_SET);
if (ph1.black_col)
read_shorts ((ushort *) c_black[0], raw_height*2);
r_black = c_black + raw_height;
fseek (ifp, ph1.black_row, SEEK_SET);
if (ph1.black_row)
read_shorts ((ushort *) r_black[0], raw_width*2);
#ifdef LIBRAW_LIBRARY_BUILD
// Copy data to internal copy (ever if not read)
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_cblack,(ushort*)c_black[0],raw_height*2*sizeof(ushort));
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_rblack,(ushort*)r_black[0],raw_width*2*sizeof(ushort));
}
#endif
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
for (col=0; col < raw_width; col++) {
#ifndef LIBRAW_LIBRARY_BUILD
i = (pixel[col] << 2) - ph1.t_black
+ c_black[row][col >= ph1.split_col]
+ r_black[col][row >= ph1.split_row];
if (i > 0) RAW(row,col) = i;
#else
RAW(row,col) = pixel[col] << 2;
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = 0xfffc - ph1.t_black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int shot, row, col, *back[5], len[2], diff[12], pred, sh, f, s, c;
unsigned upix, urow, ucol;
ushort *ip;
if (!ljpeg_start (&jh, 0)) return;
order = 0x4949;
ph1_bits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
back[4] = (int *) calloc (raw_width, 3*sizeof **back);
merror (back[4], "hasselblad_load_raw()");
FORC3 back[c] = back[4] + c*raw_width;
cblack[6] >>= sh = tiff_samples > 1;
shot = LIM(shot_select, 1, tiff_samples) - 1;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC4 back[(c+3) & 3] = back[c];
for (col=0; col < raw_width; col+=2) {
for (s=0; s < tiff_samples*2; s+=2) {
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2) {
diff[s+c] = ph1_bits(len[c]);
if ((diff[s+c] & (1 << (len[c]-1))) == 0)
diff[s+c] -= (1 << len[c]) - 1;
if (diff[s+c] == 65535) diff[s+c] = -32768;
}
}
for (s=col; s < col+2; s++) {
pred = 0x8000 + load_flags;
if (col) pred = back[2][s-2];
if (col && row > 1) switch (jh.psv) {
case 11: pred += back[0][s]/2 - back[0][s-2]/2; break;
}
f = (row & 1)*3 ^ ((col+s) & 1);
FORC (tiff_samples) {
pred += diff[(s & 1)*tiff_samples+c];
upix = pred >> sh & 0xffff;
if (raw_image && c == shot)
RAW(row,s) = upix;
if (image) {
urow = row-top_margin + (c & 1);
ucol = col-left_margin - ((c >> 1) & 1);
ip = &image[urow*width+ucol][f];
if (urow < height && ucol < width)
*ip = c < 4 ? upix : (*ip + upix) >> 1;
}
}
back[2][s] = pred;
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (back[4]);
ljpeg_end (&jh);
throw;
}
#endif
free (back[4]);
ljpeg_end (&jh);
if (image) mix_green = 1;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel=0;
unsigned tile=0, r, c, row, col;
if (!filters) {
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "leaf_hdr_load_raw()");
}
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
FORC(tiff_samples)
for (r=0; r < raw_height; r++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (r % tile_length == 0) {
fseek (ifp, data_offset + 4*tile++, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select) continue;
if (filters) pixel = raw_image + r*raw_width;
read_shorts (pixel, raw_width);
if (!filters && (row = r - top_margin) < height)
for (col=0; col < width; col++)
image[row*width+col][c] = pixel[col+left_margin];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
if(!filters) free(pixel);
throw;
}
#endif
if (!filters) {
maximum = 0xffff;
raw_color = 1;
free (pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits=0;
while (1 << ++bits < maximum);
read_shorts (raw_image, raw_width*raw_height);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++)
if ((RAW(row,col) >>= load_flags) >> bits
&& (unsigned) (row-top_margin) < height
&& (unsigned) (col-left_margin) < width) derror();
}
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if (raw_image) {
shot = LIM (shot_select, 1, 4) - 1;
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
pixel = (ushort *) calloc (raw_width, sizeof *pixel);
merror (pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (shot=0; shot < 4; shot++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + shot*4, SEEK_SET);
fseek (ifp, get4(), SEEK_SET);
for (row=0; row < raw_height; row++) {
read_shorts (pixel, raw_width);
if ((r = row-top_margin - (shot >> 1 & 1)) >= height) continue;
for (col=0; col < raw_width; col++) {
if ((c = col-left_margin - (shot & 1)) >= width) continue;
image[r*width+c][(row & 1)*3 ^ (~col & 1)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
free(pixel);
throw;
}
#endif
free (pixel);
mix_green = 1;
}
void CLASS imacon_full_load_raw()
{
int row, col;
if (!image) return;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned short *buf = (unsigned short *)malloc(width*3*sizeof(unsigned short));
merror(buf,"imacon_full_load_raw");
#endif
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
read_shorts(buf,width*3);
unsigned short (*rowp)[4] = &image[row*width];
for (col=0; col < width; col++)
{
rowp[col][0]=buf[col*3];
rowp[col][1]=buf[col*3+1];
rowp[col][2]=buf[col*3+2];
rowp[col][3]=0;
}
#else
for (col=0; col < width; col++)
read_shorts (image[row*width+col], 3);
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(buf);
#endif
}
void CLASS packed_load_raw()
{
int vbits=0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf=0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 7;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1) bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 24);
half = (raw_height+1) >> 1;
for (irow=0; irow < raw_height; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = irow;
if (load_flags & 2 &&
(row = irow % half * 2 + irow / half) == 1 &&
load_flags & 4) {
if (vbits=0, tiff_compress)
fseek (ifp, data_offset - (-half*bwide & -2048), SEEK_SET);
else {
fseek (ifp, 0, SEEK_END);
fseek (ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
for (col=0; col < raw_width; col++) {
for (vbits -= tiff_bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
val = bitbuf << (64-tiff_bps-vbits) >> (64-tiff_bps);
RAW(row,col ^ (load_flags >> 6 & 1)) = val;
if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) &&
row < height+top_margin && col < width+left_margin) derror();
}
vbits -= rbits;
}
}
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
double sum[]={0,0};
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *) malloc (dwide*2);
merror (data, "nokia_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (data+dwide, 1, dwide, ifp) < dwide) derror();
FORC(dwide) data[c] = data[dwide+(c ^ rev)];
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...){
free (data);
throw;
}
#endif
free (data);
maximum = 0x3ff;
if (strcmp(make,"OmniVision")) return;
row = raw_height/2;
FORC(width-1) {
sum[ c & 1] += SQR(RAW(row,c)-RAW(row+1,c+1));
sum[~c & 1] += SQR(RAW(row+1,c)-RAW(row,c+1));
}
if (sum[1] > sum[0]) filters = 0x4b4b4b4b;
}
void CLASS android_tight_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
bwide = -(-5*raw_width >> 5) << 3;
data = (uchar *) malloc (bwide);
merror (data, "android_tight_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data, 1, bwide, ifp) < bwide) derror();
for (dp=data, col=0; col < raw_width; dp+=5, col+=4)
FORC4 RAW(row,col+c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free (data);
}
void CLASS android_loose_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
UINT64 bitbuf=0;
bwide = (raw_width+5)/6 << 3;
data = (uchar *) malloc (bwide);
merror (data, "android_loose_load_raw()");
for (row=0; row < raw_height; row++) {
if (fread (data, 1, bwide, ifp) < bwide) derror();
for (dp=data, col=0; col < raw_width; dp+=8, col+=6) {
FORC(8) bitbuf = (bitbuf << 8) | dp[c^7];
FORC(6) RAW(row,col+c) = (bitbuf >> c*10) & 0x3ff;
}
}
free (data);
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
#ifdef LIBRAW_LIBRARY_BUILD
int *words = (int*)malloc(sizeof(int)*(raw_width/3+1));
merror(words,"canon_rmf_load_raw");
#endif
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
fread(words,sizeof(int),raw_width/3,ifp);
for (col=0; col < raw_width-2; col+=3)
{
bits = words[col/3];
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0)
{
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
#else
for (col=0; col < raw_width-2; col+=3) {
bits = get4();
FORC3 {
orow = row;
if ((ocol = col+c-4) < 0) {
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow,ocol) = curve[bits >> (10*c+2) & 0x3ff];
}
}
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(words);
#endif
maximum = curve[0x3ff];
}
unsigned CLASS pana_bits (int nbits)
{
#ifndef LIBRAW_NOTHREADS
#define buf tls->pana_bits.buf
#define vbits tls->pana_bits.vbits
#else
static uchar buf[0x4000];
static int vbits;
#endif
int byte;
if (!nbits) return vbits=0;
if (!vbits) {
fread (buf+load_flags, 1, 0x4000-load_flags, ifp);
fread (buf, 1, load_flags, ifp);
}
vbits = (vbits - nbits) & 0x1ffff;
byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte+1] << 8) >> (vbits & 7) & ~((~0u) << nbits);
#ifndef LIBRAW_NOTHREADS
#undef buf
#undef vbits
#endif
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh=0, pred[2], nonz[2];
pana_bits(0);
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2) sh = 4 >> (3 - pana_bits(2));
if (nonz[i & 1]) {
if ((j = pana_bits(8))) {
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~((~0u) << sh);
pred[i & 1] += j << sh;
}
} else if ((nonz[i & 1] = pana_bits(8)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_bits(4);
if ((RAW(row,col) = pred[col & 1]) > 4098 && col < width) derror();
}
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n=0] = 0xc0c;
for (i=12; i--; )
FORC(2048 >> i) huff[++n] = (i+1) << 8 | i;
fseek (ifp, 7, SEEK_CUR);
getbits(-1);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (acarry, 0, sizeof acarry);
for (col=0; col < raw_width; col++) {
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits=2+i; (ushort) carry[0] >> (nbits+i); nbits++);
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12,huff)) == 12)
high = getbits(16-nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff*3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2]+1;
if (col >= width) continue;
if (row < 2 && col < 2) pred = 0;
else if (row < 2) pred = RAW(row,col-2);
else if (col < 2) pred = RAW(row-2,col);
else {
w = RAW(row,col-2);
n = RAW(row-2,col);
nw = RAW(row-2,col-2);
if ((w < nw && nw < n) || (n < nw && nw < w)) {
if (ABS(w-nw) > 32 || ABS(n-nw) > 32)
pred = w + n - nw;
else pred = (w + n) >> 1;
} else pred = ABS(w-nw) > ABS(n-nw) ? w : n;
}
if ((RAW(row,col) = pred + ((diff << 2) | low)) >> 12) derror();
}
}
}
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow=0; irow < 1481; irow++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 768, ifp) < 768) derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box-12)*2);
switch (irow) {
case 1477: case 1479: continue;
case 1476: row = 984; break;
case 1480: row = 985; break;
case 1478: row = 985; box = 1;
}
if ((box < 12) && (box & 1)) {
for (col=0; col < 1533; col++, row ^= 1)
if (col != 1) RAW(row,col) = (col+1) & 2 ?
pixel[col/2-1] + pixel[col/2+1] : pixel[col/2] << 1;
RAW(row,1) = pixel[1] << 1;
RAW(row,1533) = pixel[765] << 1;
} else
for (col=row & 1; col < 1534; col+=2)
RAW(row,col) = pixel[col/2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] =
{ -89,-60,-44,-32,-22,-15,-8,-2,2,8,15,22,32,44,60,89 };
static const short rstep[6][4] =
{ { -3,-1,1,3 }, { -5,-1,1,5 }, { -8,-2,2,8 },
{ -13,-3,3,13 }, { -19,-4,4,19 }, { -28,-6,6,28 } };
static const short t_curve[256] =
{ 0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,
28,29,30,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,
54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,
79,80,81,82,83,84,86,88,90,92,94,97,99,101,103,105,107,110,112,114,116,
118,120,123,125,127,129,131,134,136,138,140,142,144,147,149,151,153,155,
158,160,162,164,166,168,171,173,175,177,179,181,184,186,188,190,192,195,
197,199,201,203,205,208,210,212,214,216,218,221,223,226,230,235,239,244,
248,252,257,261,265,270,274,278,283,287,291,296,300,305,309,313,318,322,
326,331,335,339,344,348,352,357,361,365,370,374,379,383,387,392,396,400,
405,409,413,418,422,426,431,435,440,444,448,453,457,461,466,470,474,479,
483,487,492,496,500,508,519,531,542,553,564,575,587,598,609,620,631,643,
654,665,676,687,698,710,721,732,743,754,766,777,788,799,810,822,833,844,
855,866,878,889,900,911,922,933,945,956,967,978,989,1001,1012,1023 };
int rb, row, col, sharp, val=0;
getbits(-1);
memset (pixel, 0x80, sizeof pixel);
for (row=2; row < height+2; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=2+(row & 1); col < width+2; col+=2) {
val = ((pixel[row-1][col-1] + 2*pixel[row-1][col+1] +
pixel[row][col-2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val,0,255);
if (col < 4)
pixel[row][col-2] = pixel[row+1][~row & 1] = val;
if (row == 2)
pixel[row-1][col+1] = pixel[row-1][col+3] = val;
}
pixel[row][col] = val;
}
for (rb=0; rb < 2; rb++)
for (row=2+rb; row < height+2; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
if (row < 4 || col < 4) sharp = 2;
else {
val = ABS(pixel[row-2][col] - pixel[row][col-2])
+ ABS(pixel[row-2][col] - pixel[row-2][col-2])
+ ABS(pixel[row][col-2] - pixel[row-2][col-2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 :
val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row-2][col] + pixel[row][col-2]) >> 1)
+ rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val,0,255);
if (row < 4) pixel[row-2][col+2] = val;
if (col < 4) pixel[row+2][col-2] = val;
}
}
for (row=2; row < height+2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=3-(row & 1); col < width+2; col+=2) {
val = ((pixel[row][col-1] + (pixel[row][col] << 2) +
pixel[row][col+1]) >> 1) - 0x100;
pixel[row][col] = LIM(val,0,255);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = t_curve[pixel[row+2][col+2]];
}
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char) getbithuff(8,huff[tree]))
#define FORYX for (y=1; y < 3; y++) for (x=col+1; x >= col; x--)
#define PREDICTOR (c ? (buf[c][y-1][x] + buf[c][y][x+1]) / 2 \
: (buf[c][y-1][x+1] + 2*buf[c][y-1][x] + buf[c][y][x+1]) / 4)
#ifdef __GNUC__
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
# pragma GCC optimize("no-aggressive-loop-optimizations")
# endif
#endif
void CLASS kodak_radc_load_raw()
{
static const char src[] = {
1,1, 2,3, 3,4, 4,2, 5,7, 6,5, 7,6, 7,8,
1,0, 2,1, 3,3, 4,4, 5,2, 6,7, 7,6, 8,5, 8,8,
2,1, 2,3, 3,0, 3,2, 3,4, 4,6, 5,5, 6,7, 6,8,
2,0, 2,1, 2,3, 3,2, 4,4, 5,6, 6,7, 7,5, 7,8,
2,1, 2,4, 3,0, 3,2, 3,3, 4,7, 5,5, 6,6, 6,8,
2,3, 3,1, 3,2, 3,4, 3,5, 3,6, 4,7, 5,0, 5,8,
2,3, 2,6, 3,0, 3,1, 4,4, 4,5, 4,7, 5,2, 5,8,
2,4, 2,7, 3,3, 3,6, 4,1, 4,2, 4,5, 5,0, 5,8,
2,6, 3,1, 3,3, 3,5, 3,7, 3,8, 4,0, 5,2, 5,4,
2,0, 2,1, 3,2, 3,3, 4,4, 4,5, 5,6, 5,7, 4,8,
1,0, 2,2, 2,-2,
1,-3, 1,3,
2,-17, 2,-5, 2,5, 2,17,
2,-7, 2,2, 2,9, 2,18,
2,-18, 2,-9, 2,-2, 2,7,
2,-28, 2,28, 3,-49, 3,-9, 3,9, 4,49, 5,-79, 5,79,
2,-1, 2,13, 2,26, 3,39, 4,-16, 5,55, 6,-37, 6,76,
2,-26, 2,-13, 2,1, 3,-39, 4,16, 5,-55, 6,-76, 6,37
};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = { 16,16,16 }, mul[3], buf[3][3][386];
static const ushort pt[] =
{ 0,0, 1280,1344, 2320,3616, 3328,8000, 4095,16383, 65535,16383 };
for (i=2; i < 12; i+=2)
for (c=pt[i-2]; c <= pt[i]; c++)
curve[c] = (float)
(c-pt[i-2]) / (pt[i]-pt[i-2]) * (pt[i+1]-pt[i-1]) + pt[i-1] + 0.5;
for (s=i=0; i < sizeof src; i+=2)
FORC(256 >> src[i])
huff[0][s++] = src[i] << 8 | (uchar) src[i+1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8-s) << 8 | c >> s << s | 1 << (s-1);
getbits(-1);
for (i=0; i < sizeof(buf)/sizeof(short); i++)
buf[0][0][i] = 2048;
for (row=0; row < height; row+=4) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
FORC3 {
val = ((0x1000000/last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10:12;
x = ~((~0u) << (s-1));
val <<= 12-s;
for (i=0; i < sizeof(buf[0])/sizeof(short); i++)
buf[c][0][i] = (buf[c][0][i] * val + x) >> s;
last[c] = mul[c];
for (r=0; r <= !c; r++) {
buf[c][1][width/2] = buf[c][2][width/2] = mul[c] << 7;
for (tree=1, col=width/2; col > 0; ) {
if ((tree = radc_token(tree))) {
col -= 2;
if (tree == 8)
FORYX buf[c][y][x] = (uchar) radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree+10) * 16 + PREDICTOR;
} else
do {
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep=0; rep < 8 && rep < nreps && col > 0; rep++) {
col -= 2;
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1) {
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y=0; y < 2; y++)
for (x=0; x < width/2; x++) {
val = (buf[c][y+1][x] << 4) / mul[c];
if (val < 0) val = 0;
if (c) RAW(row+y*2+c-1,x*2+2-c) = val;
else RAW(row+r*2+y,x*2+y) = val;
}
memcpy (buf[c][0]+!c, buf[c][2], sizeof buf[c][0]-2*!c);
}
}
for (y=row; y < row+4; y++)
for (x=0; x < width; x++)
if ((x+y) & 1) {
r = x ? x-1 : x+1;
s = x+1 < width ? x+1 : x-1;
val = (RAW(y,x)-2048)*2 + (RAW(y,r)+RAW(y,s))/2;
if (val < 0) val = 0;
RAW(y,x) = val;
}
}
for (i=0; i < height*width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
#ifndef LIBRAW_LIBRARY_BUILD
METHODDEF(boolean)
fill_input_buffer (j_decompress_ptr cinfo)
{
static uchar jpeg_buffer[4096];
size_t nbytes;
nbytes = fread (jpeg_buffer, 1, 4096, ifp);
swab (jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
jpeg_stdio_src (&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 )) {
fprintf (stderr,_("%s: incorrect JPEG dimensions\n"), ifname);
jpeg_destroy_decompress (&cinfo);
longjmp (failure, 3);
}
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, width*3, 1);
while (cinfo.output_scanline < cinfo.output_height) {
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
maximum = 0xff << 1;
}
#else
struct jpegErrorManager {
struct jpeg_error_mgr pub;
};
static void jpegErrorExit (j_common_ptr cinfo)
{
jpegErrorManager* myerr = (jpegErrorManager*) cinfo->err;
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
// LibRaw's Kodak_jpeg_load_raw
void CLASS kodak_jpeg_load_raw()
{
if(data_size < 1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
int row, col;
jpegErrorManager jerr;
struct jpeg_decompress_struct cinfo;
cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = jpegErrorExit;
unsigned char *jpg_buf = (unsigned char *)malloc(data_size);
merror(jpg_buf,"kodak_jpeg_load_raw");
unsigned char *pixel_buf = (unsigned char*) malloc(width*3);
jpeg_create_decompress (&cinfo);
merror(pixel_buf,"kodak_jpeg_load_raw");
fread(jpg_buf,data_size,1,ifp);
swab ((char*)jpg_buf, (char*)jpg_buf, data_size);
try
{
jpeg_mem_src(&cinfo, jpg_buf, data_size);
int rc = jpeg_read_header(&cinfo, TRUE);
if(rc!=1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
jpeg_start_decompress (&cinfo);
if ((cinfo.output_width != width ) ||
(cinfo.output_height*2 != height ) ||
(cinfo.output_components != 3 ))
{
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
unsigned char *buf[1];
buf[0] = pixel_buf;
while (cinfo.output_scanline < cinfo.output_height)
{
checkCancel();
row = cinfo.output_scanline * 2;
jpeg_read_scanlines (&cinfo, buf, 1);
unsigned char (*pixel)[3] = (unsigned char (*)[3]) buf[0];
for (col=0; col < width; col+=2) {
RAW(row+0,col+0) = pixel[col+0][1] << 1;
RAW(row+1,col+1) = pixel[col+1][1] << 1;
RAW(row+0,col+1) = pixel[col][0] + pixel[col+1][0];
RAW(row+1,col+0) = pixel[col][2] + pixel[col+1][2];
}
}
}
catch (...)
{
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
free(jpg_buf);
free(pixel_buf);
throw;
}
jpeg_finish_decompress (&cinfo);
jpeg_destroy_decompress (&cinfo);
free(jpg_buf);
free(pixel_buf);
maximum = 0xff << 1;
}
#endif
#ifndef LIBRAW_LIBRARY_BUILD
void CLASS gamma_curve (double pwr, double ts, int mode, int imax);
#endif
void CLASS lossy_dng_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE (*pixel)[3];
unsigned sorder=order, ntags, opcode, deg, i, j, c;
unsigned save=data_offset-4, trow=0, tcol=0, row, col;
ushort cur[3][256];
double coeff[9], tot;
if (meta_offset) {
fseek (ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--) {
opcode = get4(); get4(); get4();
if (opcode != 8)
{ fseek (ifp, get4(), SEEK_CUR); continue; }
fseek (ifp, 20, SEEK_CUR);
if ((c = get4()) > 2) break;
fseek (ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8) break;
for (i=0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i=0; i < 256; i++) {
for (tot=j=0; j <= deg; j++)
tot += coeff[j] * pow(i/255.0, (int)j);
cur[c][i] = tot*0xffff;
}
}
order = sorder;
} else {
gamma_curve (1/2.4, 12.92, 1, 255);
FORC3 memcpy (cur[c], curve, sizeof cur[0]);
}
cinfo.err = jpeg_std_error (&jerr);
jpeg_create_decompress (&cinfo);
while (trow < raw_height) {
fseek (ifp, save+=4, SEEK_SET);
if (tile_length < INT_MAX)
fseek (ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if(libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src (&cinfo, ifp);
#endif
jpeg_read_header (&cinfo, TRUE);
jpeg_start_decompress (&cinfo);
buf = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width*3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
while (cinfo.output_scanline < cinfo.output_height &&
(row = trow + cinfo.output_scanline) < height) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines (&cinfo, buf, 1);
pixel = (JSAMPLE (*)[3]) buf[0];
for (col=0; col < cinfo.output_width && tcol+col < width; col++) {
FORC3 image[row*width+tcol+col][c] = cur[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
jpeg_destroy_decompress (&cinfo);
throw;
}
#endif
jpeg_abort_decompress (&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress (&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = { 162, 192, 187, 92 };
static const int add[4] = { 0, 636, 424, 212 };
uchar pixel[848];
int row, shift, col;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, 848, ifp) < 848) derror();
shift = row * mul[row & 3] + add[row & 3];
for (col=0; col < width; col++)
RAW(row,col) = (ushort) pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *) calloc (raw_width, sizeof *pixel);
merror (pixel, "eight_bit_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, 1, raw_width, ifp) < raw_width) derror();
for (col=0; col < raw_width; col++)
RAW(row,col) = curve[pixel[col]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c330_load_raw()
{
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 2*sizeof *pixel);
merror (pixel, "kodak_c330_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread (pixel, raw_width, 2, ifp) < 2) derror();
if (load_flags && (row & 31) == 31)
fseek (ifp, raw_width*32, SEEK_CUR);
for (col=0; col < width; col++) {
y = pixel[col*2];
cb = pixel[(col*2 & -4) | 1] - 128;
cr = pixel[(col*2 & -4) | 3] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_c603_load_raw()
{
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *) calloc (raw_width, 3*sizeof *pixel);
merror (pixel, "kodak_c603_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (~row & 1)
if (fread (pixel, raw_width, 3, ifp) < 3) derror();
for (col=0; col < width; col++) {
y = pixel[width*2*(row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2)+1] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row*width+col][c] = curve[LIM(rgb[c],0,255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] =
{ { 0,1,5,1,1,2,0,0,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 },
{ 0,3,1,1,1,1,1,2,0,0,0,0,0,0,0,0, 0,1,2,3,4,5,6,7,8,9 } };
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi=0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder (kodak_tree[c]);
ns = (raw_height+63) >> 5;
pixel = (uchar *) malloc (raw_width*32 + ns*4);
merror (pixel, "kodak_262_load_raw()");
strip = (int *) (pixel + raw_width*32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if ((row & 31) == 0) {
fseek (ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col=0; col < raw_width; col++) {
chess = (row + col) & 1;
pi1 = chess ? pi-2 : pi-raw_width-1;
pi2 = chess ? pi-2*raw_width : pi-raw_width+1;
if (col <= chess) pi1 = -1;
if (pi1 < 0) pi1 = pi2;
if (pi2 < 0) pi2 = pi1;
if (pi1 < 0 && col > 1) pi1 = pi2 = pi-2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff (huff[chess]);
if (val >> 8) derror();
val = curve[pixel[pi++]];
RAW(row,col) = val;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
FORC(2) free (huff[c]);
}
int CLASS kodak_65000_decode (short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf=0;
int save, bits=0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i=0; i < bsize; i+=2) {
c = fgetc(ifp);
if ((blen[i ] = c & 15) > 12 ||
(blen[i+1] = c >> 4) > 12 ) {
fseek (ifp, save, SEEK_SET);
for (i=0; i < bsize; i+=8) {
read_shorts (raw, 6);
out[i ] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i+1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j=0; j < 6; j++)
out[i+2+j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4) {
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i=0; i < bsize; i++) {
len = blen[i];
if (bits < len) {
for (j=0; j < 32; j+=8)
bitbuf += (INT64) fgetc(ifp) << (bits+(j^8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16-len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len-1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[256];
int row, col, len, pred[2], ret, i;
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
pred[0] = pred[1] = 0;
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len);
for (i=0; i < len; i++)
if ((RAW(row,col+i) = curve[ret ? buf[i] :
(pred[i & 1] += buf[i])]) >> 12) derror();
}
}
}
void CLASS kodak_ycbcr_load_raw()
{
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
if (!image) return;
unsigned int bits = (load_flags && load_flags > 9 && load_flags < 17)?load_flags:10;
for (row=0; row < height; row+=2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=128) {
len = MIN (128, width-col);
kodak_65000_decode (buf, len*3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp=buf, i=0; i < len; i+=2, bp+=2) {
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j=0; j < 2; j++)
for (k=0; k < 2; k++) {
if ((y[j][k] = y[j][k^1] + *bp++) >> bits) derror();
ip = image[(row+j)*width + col+i+k];
FORC3 ip[c] = curve[LIM(y[j][k]+rgb[c], 0, 0xfff)];
}
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
short buf[768], *bp;
int row, col, len, c, i, rgb[3],ret;
ushort *ip=image[0];
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col+=256) {
len = MIN (256, width-col);
ret = kodak_65000_decode (buf, len*3);
memset (rgb, 0, sizeof rgb);
for (bp=buf, i=0; i < len; i++, ip+=4)
#ifdef LIBRAW_LIBRARY_BUILD
if(load_flags == 12)
{
FORC3 ip[c] = ret ? (*bp++) : (rgb[c] += *bp++);
}
else
#endif
FORC3 if ((ip[c] = ret ? (*bp++) : (rgb[c] += *bp++)) >> 12) derror();
}
}
}
void CLASS kodak_thumb_load_raw()
{
int row, col;
colors = thumb_misc >> 5;
for (row=0; row < height; row++)
for (col=0; col < width; col++)
read_shorts (image[row*width+col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt (unsigned *data, int len, int start, int key)
{
#ifndef LIBRAW_NOTHREADS
#define pad tls->sony_decrypt.pad
#define p tls->sony_decrypt.p
#else
static unsigned pad[128], p;
#endif
if (start) {
for (p=0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0]^pad[2]) >> 31;
for (p=4; p < 127; p++)
pad[p] = (pad[p-4]^pad[p-2]) << 1 | (pad[p-3]^pad[p-1]) >> 31;
for (p=0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
while (len--)
{
*data++ ^= pad[p & 127] = pad[(p+1) & 127] ^ pad[(p+65) & 127];
p++;
}
#ifndef LIBRAW_NOTHREADS
#undef pad
#undef p
#endif
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek (ifp, 200896, SEEK_SET);
fseek (ifp, (unsigned) fgetc(ifp)*4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek (ifp, 164600, SEEK_SET);
fread (head, 1, 40, ifp);
sony_decrypt ((unsigned int *) head, 10, 1, key);
for (i=26; i-- > 22; )
key = key << 8 | head[i];
fseek (ifp, data_offset, SEEK_SET);
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row*raw_width;
if (fread (pixel, 2, raw_width, ifp) < raw_width) derror();
sony_decrypt ((unsigned int *) pixel, raw_width/2, !row, key);
for (col=0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14) derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32770];
static const ushort tab[18] =
{ 0xf11,0xf10,0xe0f,0xd0e,0xc0d,0xb0c,0xa0b,0x90a,0x809,
0x708,0x607,0x506,0x405,0x304,0x303,0x300,0x202,0x201 };
int i, c, n, col, row, sum=0;
huff[0] = 15;
for (n=i=0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (col = raw_width; col--; )
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (row=0; row < raw_height+1; row+=2) {
if (row == raw_height) row = 1;
if ((sum += ljpeg_diff(huff)) >> 12) derror();
if (row < height) RAW(row,col) = sum;
}
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *) malloc (raw_width+1);
merror (data, "sony_arw2_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fread (data, 1, raw_width, ifp);
for (dp=data, col=0; col < raw_width-30; dp+=16) {
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh=0; sh < 4 && 0x80 << sh <= max-min; sh++);
#ifdef LIBRAW_LIBRARY_BUILD
/* flag checks if outside of loop */
if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_NONE
|| imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTATOVALUE
)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
else if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_BASEONLY)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else pix[i]=0;
}
else if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTAONLY)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = 0;
else if (i == imin) pix[i] = 0;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
else if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTAZEROBASE)
{
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = 0;
else if (i == imin) pix[i] = 0;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh);
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
}
#else
/* unaltered dcraw processing */
for (bit=30, i=0; i < 16; i++)
if (i == imax) pix[i] = max;
else if (i == imin) pix[i] = min;
else {
pix[i] = ((sget2(dp+(bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff) pix[i] = 0x7ff;
bit += 7;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTATOVALUE)
{
for (i=0; i < 16; i++, col+=2)
{
unsigned slope = pix[i] < 1001? 2 : curve[pix[i]<<1]-curve[(pix[i]<<1)-2];
unsigned step = 1 << sh;
RAW(row,col)=curve[pix[i]<<1]>black+imgdata.params.sony_arw2_posterization_thr?
LIM(((slope*step*1000)/(curve[pix[i]<<1]-black)),0,10000):0;
}
}
else
{
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1];
}
#else
for (i=0; i < 16; i++, col+=2)
RAW(row,col) = curve[pix[i] << 1] >> 2;
#endif
col -= col & 1 ? 1:31;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (data);
throw;
}
if(imgdata.params.sony_arw2_options == LIBRAW_SONYARW2_DELTATOVALUE)
maximum=10000;
#endif
free (data);
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
order = 0x4949;
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, strip_offset+row*4, SEEK_SET);
fseek (ifp, data_offset+get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7:4;
for (col=0; col < raw_width; col+=16) {
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c]) {
case 3: len[c] = ph1_bits(4); break;
case 2: len[c]--; break;
case 1: len[c]++;
}
for (c=0; c < 16; c+=2) {
i = len[((c & 1) << 1) | (c >> 3)];
RAW(row,col+c) = ((signed) ph1_bits(i) << (32-i) >> (32-i)) +
(dir ? RAW(row+(~c | -2),col+c) : col ? RAW(row,col+(c | -2)) : 128);
if (c == 14) c = -1;
}
}
}
for (row=0; row < raw_height-1; row+=2)
for (col=0; col < raw_width-1; col+=2)
SWAP (RAW(row,col+1), RAW(row+1,col));
}
void CLASS samsung2_load_raw()
{
static const ushort tab[14] =
{ 0x304,0x307,0x206,0x205,0x403,0x600,0x709,
0x80a,0x90b,0xa0c,0xa0d,0x501,0x408,0x402 };
ushort huff[1026], vpred[2][2] = {{0,0},{0,0}}, hpred[2];
int i, c, n, row, col, diff;
huff[0] = 10;
for (n=i=0; i < 14; i++)
FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (row=0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < raw_width; col++) {
diff = ljpeg_diff (huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
RAW(row,col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps) derror();
}
}
}
void CLASS samsung3_load_raw()
{
int opt, init, mag, pmode, row, tab, col, pred, diff, i, c;
ushort lent[3][2], len[4], *prow[2];
order = 0x4949;
fseek (ifp, 9, SEEK_CUR);
opt = fgetc(ifp);
init = (get2(),get2());
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, (data_offset-ftell(ifp)) & 15, SEEK_CUR);
ph1_bits(-1);
mag = 0; pmode = 7;
FORC(6) lent[0][c] = row < 2 ? 7:4;
prow[ row & 1] = &RAW(row-1,1-((row & 1) << 1)); // green
prow[~row & 1] = &RAW(row-2,0); // red and blue
for (tab=0; tab+15 < raw_width; tab+=16) {
if (~opt & 4 && !(tab & 63)) {
i = ph1_bits(2);
mag = i < 3 ? mag-'2'+"204"[i] : ph1_bits(12);
}
if (opt & 2)
pmode = 7 - 4*ph1_bits(1);
else if (!ph1_bits(1))
pmode = ph1_bits(3);
if (opt & 1 || !ph1_bits(1)) {
FORC4 len[c] = ph1_bits(2);
FORC4 {
i = ((row & 1) << 1 | (c & 1)) % 3;
len[c] = len[c] < 3 ? lent[i][0]-'1'+"120"[len[c]] : ph1_bits(4);
lent[i][0] = lent[i][1];
lent[i][1] = len[c];
}
}
FORC(16) {
col = tab + (((c & 7) << 1)^(c >> 3)^(row & 1));
pred = (pmode == 7 || row < 2)
? (tab ? RAW(row,tab-2+(col & 1)) : init)
: (prow[col & 1][col-'4'+"0224468"[pmode]] +
prow[col & 1][col-'4'+"0244668"[pmode]] + 1) >> 1;
diff = ph1_bits (i = len[c >> 2]);
if (diff >> (i-1)) diff -= 1 << i;
diff = diff * (mag*2+1) + mag;
RAW(row,col) = pred + diff;
}
}
}
}
#define HOLE(row) ((holes >> (((row) - raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment (unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0 },
{ 3, 3, 0, 0, 63, 47, 31, 15, 0 } };
int low, high=0xff, carry=0, nbits=8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[]={0,0};
ushort data=0, range=0;
fseek (ifp, seg[0][1]+1, SEEK_SET);
getbits(-1);
for (pix=seg[0][0]; pix < seg[1][0]; pix++) {
for (s=0; s < 3; s++) {
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry+1) < 1 ? nbits-1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff) break;
if (nbits > 0)
data = ((data & ((1 << (nbits-1)) - 1)) << 1) |
((data + (((data & (1 << (nbits-1)))) << 1)) & ((~0u) << nbits));
if (nbits >= 0) {
data += getbits(1);
carry = nbits - 8;
}
count = ((((data-range+1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin=0; hist[s][bin+5] > count; bin++);
low = hist[s][bin+5] * (high >> 4) >> 2;
if (bin) high = hist[s][bin+4] * (high >> 4) >> 2;
high -= low;
for (nbits=0; high << nbits < 128; nbits++);
range = (range+low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3]) {
next = (next+1) & hist[s][0];
hist[s][3] = (hist[s][next+4] - hist[s][next+5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1]+4] - hist[s][hist[s][1]+5] > 1) {
if (bin < hist[s][1])
for (i=bin; i < hist[s][1]; i++) hist[s][i+5]--;
else if (next <= bin)
for (i=hist[s][1]; i < bin; i++) hist[s][i+5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width)) pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek (ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment (seg, 0);
}
int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes (int holes)
{
int row, col, val[4];
for (row=2; row < height-2; row++) {
if (!HOLE(row)) continue;
for (col=1; col < width-1; col+=4) {
val[0] = RAW(row-1,col-1);
val[1] = RAW(row-1,col+1);
val[2] = RAW(row+1,col-1);
val[3] = RAW(row+1,col+1);
RAW(row,col) = median4(val);
}
for (col=2; col < width-2; col+=4)
if (HOLE(row-2) || HOLE(row+2))
RAW(row,col) = (RAW(row,col-2) + RAW(row,col+2)) >> 1;
else {
val[0] = RAW(row,col-2);
val[1] = RAW(row,col+2);
val[2] = RAW(row-2,col);
val[3] = RAW(row+2,col);
RAW(row,col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek (ifp, 67, SEEK_SET);
offset = get4();
nseg = fgetc(ifp);
fseek (ifp, offset, SEEK_SET);
for (i=0; i < nseg*2; i++)
seg[0][i] = get4() + data_offset*(i & 1);
fseek (ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek (ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i=0; i < nseg; i++)
smal_decode_segment (seg+i, holes);
if (holes) fill_holes (holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
#ifndef LIBRAW_LIBRARY_BUILD
in = jas_stream_fopen (ifname, "rb");
#else
in = (jas_stream_t*)ifp->make_jas_stream();
if(!in)
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
#endif
jas_stream_seek (in, data_offset+20, SEEK_SET);
jimg = jas_image_decode (in, -1, 0);
#ifndef LIBRAW_LIBRARY_BUILD
if (!jimg) longjmp (failure, 3);
#else
if(!jimg)
{
jas_stream_close (in);
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
}
#endif
jmat = jas_matrix_create (height/2, width/2);
merror (jmat, "redcine_load_raw()");
img = (ushort *) calloc ((height+2), (width+2)*2);
merror (img, "redcine_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
bool fastexitflag = false;
try {
#endif
FORC4 {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jas_image_readcmpt (jimg, c, 0, 0, width/2, height/2, jmat);
data = jas_matrix_getref (jmat, 0, 0);
for (row = c >> 1; row < height; row+=2)
for (col = c & 1; col < width; col+=2)
img[(row+1)*(width+2)+col+1] = data[(row/2)*(width/2)+col/2];
}
for (col=1; col <= width; col++) {
img[col] = img[2*(width+2)+col];
img[(height+1)*(width+2)+col] = img[(height-1)*(width+2)+col];
}
for (row=0; row < height+2; row++) {
img[row*(width+2)] = img[row*(width+2)+2];
img[(row+1)*(width+2)-1] = img[(row+1)*(width+2)-3];
}
for (row=1; row <= height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pix = img + row*(width+2) + (col = 1 + (FC(row,1) & 1));
for ( ; col <= width; col+=2, pix+=2) {
c = (((pix[0] - 0x800) << 3) +
pix[-(width+2)] + pix[width+2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c,0,4095);
}
}
for (row=0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++)
RAW(row,col) = curve[img[(row+1)*(width+2)+col+1]];
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch (...) {
fastexitflag=true;
}
#endif
free (img);
jas_matrix_destroy (jmat);
jas_image_destroy (jimg);
jas_stream_close (in);
#ifdef LIBRAW_LIBRARY_BUILD
if(fastexitflag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
#endif
}
//@end COMMON
/* RESTRICTED code starts here */
void CLASS foveon_decoder (unsigned size, unsigned code)
{
static unsigned huff[1024];
struct decode *cur;
int i, len;
if (!code) {
for (i=0; i < size; i++)
huff[i] = get4();
memset (first_decode, 0, sizeof first_decode);
free_decode = first_decode;
}
cur = free_decode++;
if (free_decode > first_decode+2048) {
fprintf (stderr,_("%s: decoder table overflow\n"), ifname);
longjmp (failure, 2);
}
if (code)
for (i=0; i < size; i++)
if (huff[i] == code) {
cur->leaf = i;
return;
}
if ((len = code >> 27) > 26) return;
code = (len+1) << 27 | (code & 0x3ffffff) << 1;
cur->branch[0] = free_decode;
foveon_decoder (size, code);
cur->branch[1] = free_decode;
foveon_decoder (size, code+1);
}
void CLASS foveon_thumb()
{
unsigned bwide, row, col, bitbuf=0, bit=1, c, i;
char *buf;
struct decode *dindex;
short pred[3];
bwide = get4();
fprintf (ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
if (bwide > 0) {
if (bwide < thumb_width*3) return;
buf = (char *) malloc (bwide);
merror (buf, "foveon_thumb()");
for (row=0; row < thumb_height; row++) {
fread (buf, 1, bwide, ifp);
fwrite (buf, 3, thumb_width, ofp);
}
free (buf);
return;
}
foveon_decoder (256, 0);
for (row=0; row < thumb_height; row++) {
memset (pred, 0, sizeof pred);
if (!bit) get4();
for (bit=col=0; col < thumb_width; col++)
FORC3 {
for (dindex=first_decode; dindex->branch[0]; ) {
if ((bit = (bit-1) & 31) == 31)
for (i=0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += dindex->leaf;
fputc (pred[c], ofp);
}
}
}
void CLASS foveon_sd_load_raw()
{
struct decode *dindex;
short diff[1024];
unsigned bitbuf=0;
int pred[3], row, col, bit=-1, c, i;
read_shorts ((ushort *) diff, 1024);
if (!load_flags) foveon_decoder (1024, 0);
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset (pred, 0, sizeof pred);
if (!bit && !load_flags && atoi(model+2) < 14) get4();
for (col=bit=0; col < width; col++) {
if (load_flags) {
bitbuf = get4();
FORC3 pred[2-c] += diff[bitbuf >> c*10 & 0x3ff];
}
else FORC3 {
for (dindex=first_decode; dindex->branch[0]; ) {
if ((bit = (bit-1) & 31) == 31)
for (i=0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += diff[dindex->leaf];
if (pred[c] >> 16 && ~pred[c] >> 16) derror();
}
FORC3 image[row*width+col][c] = pred[c];
}
}
}
void CLASS foveon_huff (ushort *huff)
{
int i, j, clen, code;
huff[0] = 8;
for (i=0; i < 13; i++) {
clen = getc(ifp);
code = getc(ifp);
for (j=0; j < 256 >> clen; )
huff[code+ ++j] = clen << 8 | i;
}
get2();
}
void CLASS foveon_dp_load_raw()
{
unsigned c, roff[4], row, col, diff;
ushort huff[512], vpred[2][2], hpred[2];
fseek (ifp, 8, SEEK_CUR);
foveon_huff (huff);
roff[0] = 48;
FORC3 roff[c+1] = -(-(roff[c] + get4()) & -16);
FORC3 {
fseek (ifp, data_offset+roff[c], SEEK_SET);
getbits(-1);
vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512;
for (row=0; row < height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col=0; col < width; col++) {
diff = ljpeg_diff(huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
image[row*width+col][c] = hpred[col & 1];
}
}
}
}
void CLASS foveon_load_camf()
{
unsigned type, wide, high, i, j, row, col, diff;
ushort huff[258], vpred[2][2] = {{512,512},{512,512}}, hpred[2];
fseek (ifp, meta_offset, SEEK_SET);
type = get4(); get4(); get4();
wide = get4();
high = get4();
if (type == 2) {
fread (meta_data, 1, meta_length, ifp);
for (i=0; i < meta_length; i++) {
high = (high * 1597 + 51749) % 244944;
wide = high * (INT64) 301593171 >> 24;
meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17;
}
} else if (type == 4) {
free (meta_data);
meta_data = (char *) malloc (meta_length = wide*high*3/2);
merror (meta_data, "foveon_load_camf()");
foveon_huff (huff);
get4();
getbits(-1);
for (j=row=0; row < high; row++) {
for (col=0; col < wide; col++) {
diff = ljpeg_diff(huff);
if (col < 2) hpred[col] = vpred[row & 1][col] += diff;
else hpred[col & 1] += diff;
if (col & 1) {
meta_data[j++] = hpred[0] >> 4;
meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8;
meta_data[j++] = hpred[1];
}
}
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("%s has unknown CAMF type %d.\n"), ifname, type);
#endif
}
const char * CLASS foveon_camf_param (const char *block, const char *param)
{
unsigned idx, num;
char *pos, *cp, *dp;
for (idx=0; idx < meta_length; idx += sget4(pos+8)) {
pos = meta_data + idx;
if (strncmp (pos, "CMb", 3)) break;
if (pos[3] != 'P') continue;
if (strcmp (block, pos+sget4(pos+12))) continue;
cp = pos + sget4(pos+16);
num = sget4(cp);
dp = pos + sget4(cp+4);
while (num--) {
cp += 8;
if (!strcmp (param, dp+sget4(cp)))
return dp+sget4(cp+4);
}
}
return 0;
}
void * CLASS foveon_camf_matrix (unsigned dim[3], const char *name)
{
unsigned i, idx, type, ndim, size, *mat;
char *pos, *cp, *dp;
double dsize;
for (idx=0; idx < meta_length; idx += sget4(pos+8)) {
pos = meta_data + idx;
if (strncmp (pos, "CMb", 3)) break;
if (pos[3] != 'M') continue;
if (strcmp (name, pos+sget4(pos+12))) continue;
dim[0] = dim[1] = dim[2] = 1;
cp = pos + sget4(pos+16);
type = sget4(cp);
if ((ndim = sget4(cp+4)) > 3) break;
dp = pos + sget4(cp+8);
for (i=ndim; i--; ) {
cp += 12;
dim[i] = sget4(cp);
}
if ((dsize = (double) dim[0]*dim[1]*dim[2]) > meta_length/4) break;
mat = (unsigned *) malloc ((size = dsize) * 4);
merror (mat, "foveon_camf_matrix()");
for (i=0; i < size; i++)
if (type && type != 6)
mat[i] = sget4(dp + i*4);
else
mat[i] = sget4(dp + i*2) & 0xffff;
return mat;
}
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: \"%s\" matrix not found!\n"), ifname, name);
#endif
return 0;
}
int CLASS foveon_fixed (void *ptr, int size, const char *name)
{
void *dp;
unsigned dim[3];
if (!name) return 0;
dp = foveon_camf_matrix (dim, name);
if (!dp) return 0;
memcpy (ptr, dp, size*4);
free (dp);
return 1;
}
float CLASS foveon_avg (short *pix, int range[2], float cfilt)
{
int i;
float val, min=FLT_MAX, max=-FLT_MAX, sum=0;
for (i=range[0]; i <= range[1]; i++) {
sum += val = pix[i*4] + (pix[i*4]-pix[(i-1)*4]) * cfilt;
if (min > val) min = val;
if (max < val) max = val;
}
if (range[1] - range[0] == 1) return sum/2;
return (sum - min - max) / (range[1] - range[0] - 1);
}
short * CLASS foveon_make_curve (double max, double mul, double filt)
{
short *curve;
unsigned i, size;
double x;
if (!filt) filt = 0.8;
size = 4*M_PI*max / filt;
if (size == UINT_MAX) size--;
curve = (short *) calloc (size+1, sizeof *curve);
merror (curve, "foveon_make_curve()");
curve[0] = size;
for (i=0; i < size; i++) {
x = i*filt/max/4;
curve[i+1] = (cos(x)+1)/2 * tanh(i*filt/mul) * mul + 0.5;
}
return curve;
}
void CLASS foveon_make_curves
(short **curvep, float dq[3], float div[3], float filt)
{
double mul[3], max=0;
int c;
FORC3 mul[c] = dq[c]/div[c];
FORC3 if (max < mul[c]) max = mul[c];
FORC3 curvep[c] = foveon_make_curve (max, mul[c], filt);
}
int CLASS foveon_apply_curve (short *curve, int i)
{
if (abs(i) >= curve[0]) return 0;
return i < 0 ? -curve[1-i] : curve[1+i];
}
#define image ((short (*)[4]) image)
void CLASS foveon_interpolate()
{
static const short hood[] = { -1,-1, -1,0, -1,1, 0,-1, 0,1, 1,-1, 1,0, 1,1 };
short *pix, prev[3], *curve[8], (*shrink)[3];
float cfilt=0, ddft[3][3][2], ppm[3][3][3];
float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3];
float chroma_dq[3], color_dq[3], diag[3][3], div[3];
float (*black)[3], (*sgain)[3], (*sgrow)[3];
float fsum[3], val, frow, num;
int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit;
int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3];
int work[3][3], smlast, smred, smred_p=0, dev[3];
int satlev[3], keep[4], active[4];
unsigned dim[3], *badpix;
double dsum=0, trsum[3];
char str[128];
const char* cp;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Foveon interpolation...\n"));
#endif
foveon_load_camf();
foveon_fixed (dscr, 4, "DarkShieldColRange");
foveon_fixed (ppm[0][0], 27, "PostPolyMatrix");
foveon_fixed (satlev, 3, "SaturationLevel");
foveon_fixed (keep, 4, "KeepImageArea");
foveon_fixed (active, 4, "ActiveImageArea");
foveon_fixed (chroma_dq, 3, "ChromaDQ");
foveon_fixed (color_dq, 3,
foveon_camf_param ("IncludeBlocks", "ColorDQ") ?
"ColorDQ" : "ColorDQCamRGB");
if (foveon_camf_param ("IncludeBlocks", "ColumnFilter"))
foveon_fixed (&cfilt, 1, "ColumnFilter");
memset (ddft, 0, sizeof ddft);
if (!foveon_camf_param ("IncludeBlocks", "DarkDrift")
|| !foveon_fixed (ddft[1][0], 12, "DarkDrift"))
for (i=0; i < 2; i++) {
foveon_fixed (dstb, 4, i ? "DarkShieldBottom":"DarkShieldTop");
for (row = dstb[1]; row <= dstb[3]; row++)
for (col = dstb[0]; col <= dstb[2]; col++)
FORC3 ddft[i+1][c][1] += (short) image[row*width+col][c];
FORC3 ddft[i+1][c][1] /= (dstb[3]-dstb[1]+1) * (dstb[2]-dstb[0]+1);
}
if (!(cp = foveon_camf_param ("WhiteBalanceIlluminants", model2)))
{
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Invalid white balance \"%s\"\n"), ifname, model2);
#endif
return; }
foveon_fixed (cam_xyz, 9, cp);
foveon_fixed (correct, 9,
foveon_camf_param ("WhiteBalanceCorrections", model2));
memset (last, 0, sizeof last);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j];
#define LAST(x,y) last[(i+x)%3][(c+y)%3]
for (i=0; i < 3; i++)
FORC3 diag[c][i] = LAST(1,1)*LAST(2,2) - LAST(1,2)*LAST(2,1);
#undef LAST
FORC3 div[c] = diag[c][0]*0.3127 + diag[c][1]*0.329 + diag[c][2]*0.3583;
sprintf (str, "%sRGBNeutral", model2);
if (foveon_camf_param ("IncludeBlocks", str))
foveon_fixed (div, 3, str);
num = 0;
FORC3 if (num < div[c]) num = div[c];
FORC3 div[c] /= num;
memset (trans, 0, sizeof trans);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j];
FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2];
dsum = (6*trsum[0] + 11*trsum[1] + 3*trsum[2]) / 20;
for (i=0; i < 3; i++)
FORC3 last[i][c] = trans[i][c] * dsum / trsum[i];
memset (trans, 0, sizeof trans);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
FORC3 trans[i][j] += (i==c ? 32 : -1) * last[c][j] / 30;
foveon_make_curves (curve, color_dq, div, cfilt);
FORC3 chroma_dq[c] /= 3;
foveon_make_curves (curve+3, chroma_dq, div, cfilt);
FORC3 dsum += chroma_dq[c] / div[c];
curve[6] = foveon_make_curve (dsum, dsum, cfilt);
curve[7] = foveon_make_curve (dsum*2, dsum*2, cfilt);
sgain = (float (*)[3]) foveon_camf_matrix (dim, "SpatialGain");
if (!sgain) return;
sgrow = (float (*)[3]) calloc (dim[1], sizeof *sgrow);
sgx = (width + dim[1]-2) / (dim[1]-1);
black = (float (*)[3]) calloc (height, sizeof *black);
for (row=0; row < height; row++) {
for (i=0; i < 6; i++)
ddft[0][0][i] = ddft[1][0][i] +
row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]);
FORC3 black[row][c] =
( foveon_avg (image[row*width]+c, dscr[0], cfilt) +
foveon_avg (image[row*width]+c, dscr[1], cfilt) * 3
- ddft[0][c][0] ) / 4 - ddft[0][c][1];
}
memcpy (black, black+8, sizeof *black*8);
memcpy (black+height-11, black+height-22, 11*sizeof *black);
memcpy (last, black, sizeof last);
for (row=1; row < height-1; row++) {
FORC3 if (last[1][c] > last[0][c]) {
if (last[1][c] > last[2][c])
black[row][c] = (last[0][c] > last[2][c]) ? last[0][c]:last[2][c];
} else
if (last[1][c] < last[2][c])
black[row][c] = (last[0][c] < last[2][c]) ? last[0][c]:last[2][c];
memmove (last, last+1, 2*sizeof last[0]);
memcpy (last[2], black[row+1], sizeof last[2]);
}
FORC3 black[row][c] = (last[0][c] + last[1][c])/2;
FORC3 black[0][c] = (black[1][c] + black[3][c])/2;
val = 1 - exp(-1/24.0);
memcpy (fsum, black, sizeof fsum);
for (row=1; row < height; row++)
FORC3 fsum[c] += black[row][c] =
(black[row][c] - black[row-1][c])*val + black[row-1][c];
memcpy (last[0], black[height-1], sizeof last[0]);
FORC3 fsum[c] /= height;
for (row = height; row--; )
FORC3 last[0][c] = black[row][c] =
(black[row][c] - fsum[c] - last[0][c])*val + last[0][c];
memset (total, 0, sizeof total);
for (row=2; row < height; row+=4)
for (col=2; col < width; col+=4) {
FORC3 total[c] += (short) image[row*width+col][c];
total[3]++;
}
for (row=0; row < height; row++)
FORC3 black[row][c] += fsum[c]/2 + total[c]/(total[3]*100.0);
for (row=0; row < height; row++) {
for (i=0; i < 6; i++)
ddft[0][0][i] = ddft[1][0][i] +
row / (height-1.0) * (ddft[2][0][i] - ddft[1][0][i]);
pix = image[row*width];
memcpy (prev, pix, sizeof prev);
frow = row / (height-1.0) * (dim[2]-1);
if ((irow = frow) == dim[2]-1) irow--;
frow -= irow;
for (i=0; i < dim[1]; i++)
FORC3 sgrow[i][c] = sgain[ irow *dim[1]+i][c] * (1-frow) +
sgain[(irow+1)*dim[1]+i][c] * frow;
for (col=0; col < width; col++) {
FORC3 {
diff = pix[c] - prev[c];
prev[c] = pix[c];
ipix[c] = pix[c] + floor ((diff + (diff*diff >> 14)) * cfilt
- ddft[0][c][1] - ddft[0][c][0] * ((float) col/width - 0.5)
- black[row][c] );
}
FORC3 {
work[0][c] = ipix[c] * ipix[c] >> 14;
work[2][c] = ipix[c] * work[0][c] >> 14;
work[1][2-c] = ipix[(c+1) % 3] * ipix[(c+2) % 3] >> 14;
}
FORC3 {
for (val=i=0; i < 3; i++)
for ( j=0; j < 3; j++)
val += ppm[c][i][j] * work[i][j];
ipix[c] = floor ((ipix[c] + floor(val)) *
( sgrow[col/sgx ][c] * (sgx - col%sgx) +
sgrow[col/sgx+1][c] * (col%sgx) ) / sgx / div[c]);
if (ipix[c] > 32000) ipix[c] = 32000;
pix[c] = ipix[c];
}
pix += 4;
}
}
free (black);
free (sgrow);
free (sgain);
if ((badpix = (unsigned int *) foveon_camf_matrix (dim, "BadPixels"))) {
for (i=0; i < dim[0]; i++) {
col = (badpix[i] >> 8 & 0xfff) - keep[0];
row = (badpix[i] >> 20 ) - keep[1];
if ((unsigned)(row-1) > height-3 || (unsigned)(col-1) > width-3)
continue;
memset (fsum, 0, sizeof fsum);
for (sum=j=0; j < 8; j++)
if (badpix[i] & (1 << j)) {
FORC3 fsum[c] += (short)
image[(row+hood[j*2])*width+col+hood[j*2+1]][c];
sum++;
}
if (sum) FORC3 image[row*width+col][c] = fsum[c]/sum;
}
free (badpix);
}
/* Array for 5x5 Gaussian averaging of red values */
smrow[6] = (int (*)[3]) calloc (width*5, sizeof **smrow);
merror (smrow[6], "foveon_interpolate()");
for (i=0; i < 5; i++)
smrow[i] = smrow[6] + i*width;
/* Sharpen the reds against these Gaussian averages */
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
smrow[4][col][0] =
(pix[0]*6 + (pix[-4]+pix[4])*4 + pix[-8]+pix[8] + 8) >> 4;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
smred = ( 6 * smrow[2][col][0]
+ 4 * (smrow[1][col][0] + smrow[3][col][0])
+ smrow[0][col][0] + smrow[4][col][0] + 8 ) >> 4;
if (col == 2)
smred_p = smred;
i = pix[0] + ((pix[0] - ((smred*7 + smred_p) >> 3)) >> 3);
if (i > 32000) i = 32000;
pix[0] = i;
smred_p = smred;
pix += 4;
}
}
/* Adjust the brighter pixels for better linearity */
min = 0xffff;
FORC3 {
i = satlev[c] / div[c];
if (min > i) min = i;
}
limit = min * 9 >> 4;
for (pix=image[0]; pix < image[height*width]; pix+=4) {
if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit)
continue;
min = max = pix[0];
for (c=1; c < 3; c++) {
if (min > pix[c]) min = pix[c];
if (max < pix[c]) max = pix[c];
}
if (min >= limit*2) {
pix[0] = pix[1] = pix[2] = max;
} else {
i = 0x4000 - ((min - limit) << 14) / limit;
i = 0x4000 - (i*i >> 14);
i = i*i >> 14;
FORC3 pix[c] += (max - pix[c]) * i >> 14;
}
}
/*
Because photons that miss one detector often hit another,
the sum R+G+B is much less noisy than the individual colors.
So smooth the hues without smoothing the total.
*/
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
FORC3 smrow[4][col][c] = (pix[c-4]+2*pix[c]+pix[c+4]+2) >> 2;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
FORC3 dev[c] = -foveon_apply_curve (curve[7], pix[c] -
((smrow[1][col][c] + 2*smrow[2][col][c] + smrow[3][col][c]) >> 2));
sum = (dev[0] + dev[1] + dev[2]) >> 3;
FORC3 pix[c] += dev[c] - sum;
pix += 4;
}
}
for (smlast=-1, row=2; row < height-2; row++) {
while (smlast < row+2) {
for (i=0; i < 6; i++)
smrow[(i+5) % 6] = smrow[i];
pix = image[++smlast*width+2];
for (col=2; col < width-2; col++) {
FORC3 smrow[4][col][c] =
(pix[c-8]+pix[c-4]+pix[c]+pix[c+4]+pix[c+8]+2) >> 2;
pix += 4;
}
}
pix = image[row*width+2];
for (col=2; col < width-2; col++) {
for (total[3]=375, sum=60, c=0; c < 3; c++) {
for (total[c]=i=0; i < 5; i++)
total[c] += smrow[i][col][c];
total[3] += total[c];
sum += pix[c];
}
if (sum < 0) sum = 0;
j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174;
FORC3 pix[c] += foveon_apply_curve (curve[6],
((j*total[c] + 0x8000) >> 16) - pix[c]);
pix += 4;
}
}
/* Transform the image to a different colorspace */
for (pix=image[0]; pix < image[height*width]; pix+=4) {
FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]);
sum = (pix[0]+pix[1]+pix[1]+pix[2]) >> 2;
FORC3 pix[c] -= foveon_apply_curve (curve[c], pix[c]-sum);
FORC3 {
for (dsum=i=0; i < 3; i++)
dsum += trans[c][i] * pix[i];
if (dsum < 0) dsum = 0;
if (dsum > 24000) dsum = 24000;
ipix[c] = dsum + 0.5;
}
FORC3 pix[c] = ipix[c];
}
/* Smooth the image bottom-to-top and save at 1/4 scale */
shrink = (short (*)[3]) calloc ((height/4), (width/4)*sizeof *shrink);
merror (shrink, "foveon_interpolate()");
for (row = height/4; row--; )
for (col=0; col < width/4; col++) {
ipix[0] = ipix[1] = ipix[2] = 0;
for (i=0; i < 4; i++)
for (j=0; j < 4; j++)
FORC3 ipix[c] += image[(row*4+i)*width+col*4+j][c];
FORC3
if (row+2 > height/4)
shrink[row*(width/4)+col][c] = ipix[c] >> 4;
else
shrink[row*(width/4)+col][c] =
(shrink[(row+1)*(width/4)+col][c]*1840 + ipix[c]*141 + 2048) >> 12;
}
/* From the 1/4-scale image, smooth right-to-left */
for (row=0; row < (height & ~3); row++) {
ipix[0] = ipix[1] = ipix[2] = 0;
if ((row & 3) == 0)
for (col = width & ~3 ; col--; )
FORC3 smrow[0][col][c] = ipix[c] =
(shrink[(row/4)*(width/4)+col/4][c]*1485 + ipix[c]*6707 + 4096) >> 13;
/* Then smooth left-to-right */
ipix[0] = ipix[1] = ipix[2] = 0;
for (col=0; col < (width & ~3); col++)
FORC3 smrow[1][col][c] = ipix[c] =
(smrow[0][col][c]*1485 + ipix[c]*6707 + 4096) >> 13;
/* Smooth top-to-bottom */
if (row == 0)
memcpy (smrow[2], smrow[1], sizeof **smrow * width);
else
for (col=0; col < (width & ~3); col++)
FORC3 smrow[2][col][c] =
(smrow[2][col][c]*6707 + smrow[1][col][c]*1485 + 4096) >> 13;
/* Adjust the chroma toward the smooth values */
for (col=0; col < (width & ~3); col++) {
for (i=j=30, c=0; c < 3; c++) {
i += smrow[2][col][c];
j += image[row*width+col][c];
}
j = (j << 16) / i;
for (sum=c=0; c < 3; c++) {
ipix[c] = foveon_apply_curve (curve[c+3],
((smrow[2][col][c] * j + 0x8000) >> 16) - image[row*width+col][c]);
sum += ipix[c];
}
sum >>= 3;
FORC3 {
i = image[row*width+col][c] + ipix[c] - sum;
if (i < 0) i = 0;
image[row*width+col][c] = i;
}
}
}
free (shrink);
free (smrow[6]);
for (i=0; i < 8; i++)
free (curve[i]);
/* Trim off the black border */
active[1] -= keep[1];
active[3] -= 2;
i = active[2] - active[0];
for (row=0; row < active[3]-active[1]; row++)
memcpy (image[row*i], image[(row+active[1])*width+active[0]],
i * sizeof *image);
width = i;
height = row;
}
#undef image
/* RESTRICTED code ends here */
//@out COMMON
void CLASS crop_masked_pixels()
{
int row, col;
unsigned
#ifndef LIBRAW_LIBRARY_BUILD
r, raw_pitch = raw_width*2,
c, m, mblack[8], zero, val;
#else
c, m, zero, val;
#define mblack imgdata.color.black_stat
#endif
#ifndef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS phase_one_load_raw ||
load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width) {
for (row=0; row < raw_height-top_margin*2; row++) {
for (col=0; col < fuji_width << !fuji_layout; col++) {
if (fuji_layout) {
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row+1) >> 1);
} else {
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col+1) >> 1);
}
if (r < height && c < width)
BAYER(r,c) = RAW(row+top_margin,col+left_margin);
}
}
} else {
for (row=0; row < height; row++)
for (col=0; col < width; col++)
BAYER2(row,col) = RAW(row+top_margin,col+left_margin);
}
#endif
if (mask[0][3] > 0) goto mask_set;
if (load_raw == &CLASS canon_load_raw ||
load_raw == &CLASS lossless_jpeg_load_raw) {
mask[0][1] = mask[1][1] += 2;
mask[0][3] -= 2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw ||
load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model,"DC2",3)) ||
load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 32))) {
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin+height;
mask[0][3] += left_margin;
mask[1][1] += left_margin+width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw) {
mask[0][2] = top_margin;
mask[0][3] = width;
}
mask_set:
memset (mblack, 0, sizeof mblack);
for (zero=m=0; m < 8; m++)
for (row=MAX(mask[m][0],0); row < MIN(mask[m][2],raw_height); row++)
for (col=MAX(mask[m][1],0); col < MIN(mask[m][3],raw_width); col++) {
c = FC(row-top_margin,col-left_margin);
mblack[c] += val = raw_image[(row)*raw_pitch/2+(col)];
mblack[4+c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width) {
black = (mblack[0]+mblack[1]+mblack[2]+mblack[3]) /
(mblack[4]+mblack[5]+mblack[6]+mblack[7]) - 4;
#ifndef LIBRAW_LIBRARY_BUILD
canon_600_correct();
#endif
} else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7]) {
FORC4 cblack[c] = mblack[c] / mblack[4+c];
cblack[4] = cblack[5] = cblack[6] = 0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
#undef mblack
#endif
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2);
#endif
for (row=0; row < height; row++)
for (col=0; col < width; col++)
if (BAYER(row,col) == 0) {
tot = n = 0;
for (r = row-2; r <= row+2; r++)
for (c = col-2; c <= col+2; c++)
if (r < height && c < width &&
FC(r,c) == FC(row,col) && BAYER(r,c))
tot += (n++,BAYER(r,c));
if (n) BAYER(row,col) = tot/n;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2);
#endif
}
//@end COMMON
/* @out FILEIO
#include <math.h>
#define CLASS LibRaw::
#include "libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#include "libraw.h"
#include "libraw_defines.h"
#include "var_defines.h"
@end FILEIO */
// @out FILEIO
/*
Seach from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels (const char *cfname)
{
FILE *fp=NULL;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed=0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,0,2);
#endif
if (cfname)
fp = fopen (cfname, "r");
// @end FILEIO
else {
for (len=32 ; ; len *= 2) {
fname = (char *) malloc (len);
if (!fname) return;
if (getcwd (fname, len-16)) break;
free (fname);
if (errno != ERANGE) return;
}
#if defined(WIN32) || defined(DJGPP)
if (fname[1] == ':')
memmove (fname, fname+2, len-2);
for (cp=fname; *cp; cp++)
if (*cp == '\\') *cp = '/';
#endif
cp = fname + strlen(fname);
if (cp[-1] == '/') cp--;
while (*fname == '/') {
strcpy (cp, "/.badpixels");
if ((fp = fopen (fname, "r"))) break;
if (cp == fname) break;
while (*--cp != '/');
}
free (fname);
}
// @out FILEIO
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets (line, 128, fp)) {
cp = strchr (line, '#');
if (cp) *cp = 0;
if (sscanf (line, "%d %d %d", &col, &row, &time) != 3) continue;
if ((unsigned) col >= width || (unsigned) row >= height) continue;
if (time > timestamp) continue;
for (tot=n=0, rad=1; rad < 3 && n==0; rad++)
for (r = row-rad; r <= row+rad; r++)
for (c = col-rad; c <= col+rad; c++)
if ((unsigned) r < height && (unsigned) c < width &&
(r != row || c != col) && fcol(r,c) == fcol(row,col)) {
tot += BAYER2(r,c);
n++;
}
BAYER2(row,col) = tot/n;
#ifdef DCRAW_VERBOSE
if (verbose) {
if (!fixed++)
fprintf (stderr,_("Fixed dead pixels at:"));
fprintf (stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed) fputc ('\n', stderr);
#endif
fclose (fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS,1,2);
#endif
}
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2);
#endif
if (!(fp = fopen (fname, "rb"))) {
#ifdef DCRAW_VERBOSE
perror (fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
#endif
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2);
#endif
}
//@end FILEIO
//@out COMMON
static const uchar xlat[2][256] = {
{ 0xc1,0xbf,0x6d,0x0d,0x59,0xc5,0x13,0x9d,0x83,0x61,0x6b,0x4f,0xc7,0x7f,0x3d,0x3d,
0x53,0x59,0xe3,0xc7,0xe9,0x2f,0x95,0xa7,0x95,0x1f,0xdf,0x7f,0x2b,0x29,0xc7,0x0d,
0xdf,0x07,0xef,0x71,0x89,0x3d,0x13,0x3d,0x3b,0x13,0xfb,0x0d,0x89,0xc1,0x65,0x1f,
0xb3,0x0d,0x6b,0x29,0xe3,0xfb,0xef,0xa3,0x6b,0x47,0x7f,0x95,0x35,0xa7,0x47,0x4f,
0xc7,0xf1,0x59,0x95,0x35,0x11,0x29,0x61,0xf1,0x3d,0xb3,0x2b,0x0d,0x43,0x89,0xc1,
0x9d,0x9d,0x89,0x65,0xf1,0xe9,0xdf,0xbf,0x3d,0x7f,0x53,0x97,0xe5,0xe9,0x95,0x17,
0x1d,0x3d,0x8b,0xfb,0xc7,0xe3,0x67,0xa7,0x07,0xf1,0x71,0xa7,0x53,0xb5,0x29,0x89,
0xe5,0x2b,0xa7,0x17,0x29,0xe9,0x4f,0xc5,0x65,0x6d,0x6b,0xef,0x0d,0x89,0x49,0x2f,
0xb3,0x43,0x53,0x65,0x1d,0x49,0xa3,0x13,0x89,0x59,0xef,0x6b,0xef,0x65,0x1d,0x0b,
0x59,0x13,0xe3,0x4f,0x9d,0xb3,0x29,0x43,0x2b,0x07,0x1d,0x95,0x59,0x59,0x47,0xfb,
0xe5,0xe9,0x61,0x47,0x2f,0x35,0x7f,0x17,0x7f,0xef,0x7f,0x95,0x95,0x71,0xd3,0xa3,
0x0b,0x71,0xa3,0xad,0x0b,0x3b,0xb5,0xfb,0xa3,0xbf,0x4f,0x83,0x1d,0xad,0xe9,0x2f,
0x71,0x65,0xa3,0xe5,0x07,0x35,0x3d,0x0d,0xb5,0xe9,0xe5,0x47,0x3b,0x9d,0xef,0x35,
0xa3,0xbf,0xb3,0xdf,0x53,0xd3,0x97,0x53,0x49,0x71,0x07,0x35,0x61,0x71,0x2f,0x43,
0x2f,0x11,0xdf,0x17,0x97,0xfb,0x95,0x3b,0x7f,0x6b,0xd3,0x25,0xbf,0xad,0xc7,0xc5,
0xc5,0xb5,0x8b,0xef,0x2f,0xd3,0x07,0x6b,0x25,0x49,0x95,0x25,0x49,0x6d,0x71,0xc7 },
{ 0xa7,0xbc,0xc9,0xad,0x91,0xdf,0x85,0xe5,0xd4,0x78,0xd5,0x17,0x46,0x7c,0x29,0x4c,
0x4d,0x03,0xe9,0x25,0x68,0x11,0x86,0xb3,0xbd,0xf7,0x6f,0x61,0x22,0xa2,0x26,0x34,
0x2a,0xbe,0x1e,0x46,0x14,0x68,0x9d,0x44,0x18,0xc2,0x40,0xf4,0x7e,0x5f,0x1b,0xad,
0x0b,0x94,0xb6,0x67,0xb4,0x0b,0xe1,0xea,0x95,0x9c,0x66,0xdc,0xe7,0x5d,0x6c,0x05,
0xda,0xd5,0xdf,0x7a,0xef,0xf6,0xdb,0x1f,0x82,0x4c,0xc0,0x68,0x47,0xa1,0xbd,0xee,
0x39,0x50,0x56,0x4a,0xdd,0xdf,0xa5,0xf8,0xc6,0xda,0xca,0x90,0xca,0x01,0x42,0x9d,
0x8b,0x0c,0x73,0x43,0x75,0x05,0x94,0xde,0x24,0xb3,0x80,0x34,0xe5,0x2c,0xdc,0x9b,
0x3f,0xca,0x33,0x45,0xd0,0xdb,0x5f,0xf5,0x52,0xc3,0x21,0xda,0xe2,0x22,0x72,0x6b,
0x3e,0xd0,0x5b,0xa8,0x87,0x8c,0x06,0x5d,0x0f,0xdd,0x09,0x19,0x93,0xd0,0xb9,0xfc,
0x8b,0x0f,0x84,0x60,0x33,0x1c,0x9b,0x45,0xf1,0xf0,0xa3,0x94,0x3a,0x12,0x77,0x33,
0x4d,0x44,0x78,0x28,0x3c,0x9e,0xfd,0x65,0x57,0x16,0x94,0x6b,0xfb,0x59,0xd0,0xc8,
0x22,0x36,0xdb,0xd2,0x63,0x98,0x43,0xa1,0x04,0x87,0x86,0xf7,0xa6,0x26,0xbb,0xd6,
0x59,0x4d,0xbf,0x6a,0x2e,0xaa,0x2b,0xef,0xe6,0x78,0xb6,0x4e,0xe0,0x2f,0xdc,0x7c,
0xbe,0x57,0x19,0x32,0x7e,0x2a,0xd0,0xb8,0xba,0x29,0x00,0x3c,0x52,0x7d,0xa8,0x49,
0x3b,0x2d,0xeb,0x25,0x49,0xfa,0xa3,0xaa,0x39,0xa7,0xc5,0xa7,0x50,0x11,0x36,0xfb,
0xc6,0x67,0x4a,0xf5,0xa5,0x12,0x65,0x7e,0xb0,0xdf,0xaf,0x4e,0xb3,0x61,0x7f,0x2f } };
void CLASS gamma_curve (double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2]={0,0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1]-1)*(g[0]-1) <= 0) {
for (i=0; i < 48; i++) {
g[2] = (bnd[0] + bnd[1])/2;
if (g[0]) bnd[(pow(g[2]/g[1],-g[0]) - 1)/g[0] - 1/g[2] > -1] = g[2];
else bnd[g[2]/exp(1-1/g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0]) g[4] = g[2] * (1/g[0] - 1);
}
if (g[0]) g[5] = 1 / (g[1]*SQR(g[3])/2 - g[4]*(1 - g[3]) +
(1 - pow(g[3],1+g[0]))*(1 + g[4])/(1 + g[0])) - 1;
else g[5] = 1 / (g[1]*SQR(g[3])/2 + 1
- g[2] - g[3] - g[2]*g[3]*(log(g[3]) - 1)) - 1;
if (!mode--) {
memcpy (gamm, g, sizeof gamm);
return;
}
for (i=0; i < 0x10000; i++) {
curve[i] = 0xffff;
if ((r = (double) i / imax) < 1)
curve[i] = 0x10000 * ( mode
? (r < g[3] ? r*g[1] : (g[0] ? pow( r,g[0])*(1+g[4])-g[4] : log(r)*g[2]+1))
: (r < g[2] ? r/g[1] : (g[0] ? pow((r+g[4])/(1+g[4]),1/g[0]) : exp((r-1)/g[2]))));
}
}
void CLASS pseudoinverse (double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i=0; i < 3; i++) {
for (j=0; j < 6; j++)
work[i][j] = j == i+3;
for (j=0; j < 3; j++)
for (k=0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i=0; i < 3; i++) {
num = work[i][i];
for (j=0; j < 6; j++)
work[i][j] /= num;
for (k=0; k < 3; k++) {
if (k==i) continue;
num = work[k][i];
for (j=0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i=0; i < size; i++)
for (j=0; j < 3; j++)
for (out[i][j]=k=0; k < 3; k++)
out[i][j] += work[j][k+3] * in[i][k];
}
void CLASS cam_xyz_coeff (float _rgb_cam[3][4], double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i=0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j=0; j < 3; j++)
for (cam_rgb[i][j] = k=0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i=0; i < colors; i++) { /* Normalize cam_rgb so that */
for (num=j=0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
if(num > 0.00001)
{
for (j=0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
else
{
for (j=0; j < 3; j++)
cam_rgb[i][j] = 0.0;
pre_mul[i] = 1.0;
}
}
pseudoinverse (cam_rgb, inverse, colors);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
_rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {
{ 0.400, 0.350, 10.1 }, // Dark Skin
{ 0.377, 0.345, 35.8 }, // Light Skin
{ 0.247, 0.251, 19.3 }, // Blue Sky
{ 0.337, 0.422, 13.3 }, // Foliage
{ 0.265, 0.240, 24.3 }, // Blue Flower
{ 0.261, 0.343, 43.1 }, // Bluish Green
{ 0.506, 0.407, 30.1 }, // Orange
{ 0.211, 0.175, 12.0 }, // Purplish Blue
{ 0.453, 0.306, 19.8 }, // Moderate Red
{ 0.285, 0.202, 6.6 }, // Purple
{ 0.380, 0.489, 44.3 }, // Yellow Green
{ 0.473, 0.438, 43.1 }, // Orange Yellow
{ 0.187, 0.129, 6.1 }, // Blue
{ 0.305, 0.478, 23.4 }, // Green
{ 0.539, 0.313, 12.0 }, // Red
{ 0.448, 0.470, 59.1 }, // Yellow
{ 0.364, 0.233, 19.8 }, // Magenta
{ 0.196, 0.252, 19.8 }, // Cyan
{ 0.310, 0.316, 90.0 }, // White
{ 0.310, 0.316, 59.1 }, // Neutral 8
{ 0.310, 0.316, 36.2 }, // Neutral 6.5
{ 0.310, 0.316, 19.8 }, // Neutral 5
{ 0.310, 0.316, 9.0 }, // Neutral 3.5
{ 0.310, 0.316, 3.1 } }; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], balance[4], num;
int c, i, j, k, sq, row, col, pass, count[4];
memset (gmb_cam, 0, sizeof gmb_cam);
for (sq=0; sq < NSQ; sq++) {
FORCC count[c] = 0;
for (row=cut[sq][3]; row < cut[sq][3]+cut[sq][1]; row++)
for (col=cut[sq][2]; col < cut[sq][2]+cut[sq][0]; col++) {
c = FC(row,col);
if (c >= colors) c -= 2;
gmb_cam[sq][c] += BAYER2(row,col);
BAYER2(row,col) = black + (BAYER2(row,col)-black)/2;
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c]/count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] *
(1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse (gmb_xyz, inverse, NSQ);
for (pass=0; pass < 2; pass++) {
for (raw_color = i=0; i < colors; i++)
for (j=0; j < 3; j++)
for (cam_xyz[i][j] = k=0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff (rgb_cam, cam_xyz);
FORCC balance[c] = pre_mul[c] * gmb_cam[20][c];
for (sq=0; sq < NSQ; sq++)
FORCC gmb_cam[sq][c] *= balance[c];
}
if (verbose) {
printf (" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j=0; j < 3; j++)
printf ("%c%d", (c | j) ? ',':' ', (int) (cam_xyz[c][j] * num + 0.5));
puts (" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform (float *temp, float *base, int st, int size, int sc)
{
int i;
for (i=0; i < sc; i++)
temp[i] = 2*base[st*i] + base[st*(sc-i)] + base[st*(i+sc)];
for (; i+sc < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(i+sc)];
for (; i < size; i++)
temp[i] = 2*base[st*i] + base[st*(i-sc)] + base[st*(2*size-2-(i+sc))];
}
#if !defined(LIBRAW_USE_OPENMP)
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
FORC(nc) { /* denoise R,G1,B,G3 individually */
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++) {
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#else /* LIBRAW_USE_OPENMP */
void CLASS wavelet_denoise()
{
float *fimg=0, *temp, thold, mul[2], avg, diff;
int scale=1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] =
{ 0.8002,0.2735,0.1202,0.0585,0.0291,0.0152,0.0080,0.0044 };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000) scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight*iwidth) < 0x15550000)
fimg = (float *) malloc ((size*3 + iheight + iwidth) * sizeof *fimg);
merror (fimg, "wavelet_denoise()");
temp = fimg + size*3;
if ((nc = colors) == 3 && filters) nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i,col,row,thold,lev,lpass,hpass,temp,c) firstprivate(scale,size)
#endif
{
temp = (float*)malloc( (iheight + iwidth) * sizeof *fimg);
FORC(nc) { /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass=lev=0; lev < 5; lev++) {
lpass = size*((lev & 1)+1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row=0; row < iheight; row++) {
hat_transform (temp, fimg+hpass+row*iwidth, 1, iwidth, 1 << lev);
for (col=0; col < iwidth; col++)
fimg[lpass + row*iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col=0; col < iwidth; col++) {
hat_transform (temp, fimg+lpass+col, iwidth, iheight, 1 << lev);
for (row=0; row < iheight; row++)
fimg[lpass + row*iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++) {
fimg[hpass+i] -= fimg[lpass+i];
if (fimg[hpass+i] < -thold) fimg[hpass+i] += thold;
else if (fimg[hpass+i] > thold) fimg[hpass+i] -= thold;
else fimg[hpass+i] = 0;
if (hpass) fimg[i] += fimg[hpass+i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i=0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i]+fimg[lpass+i])/0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3) { /* pull G1 and G3 closer together */
for (row=0; row < 2; row++){
mul[row] = 0.125 * pre_mul[FC(row+1,0) | 1] / pre_mul[FC(row,0) | 1];
blk[row] = cblack[FC(row,0) | 1];
}
for (i=0; i < 4; i++)
window[i] = (ushort *) fimg + width*i;
for (wlast=-1, row=1; row < height-1; row++) {
while (wlast < row+1) {
for (wlast++, i=0; i < 4; i++)
window[(i+3) & 3] = window[i];
for (col = FC(wlast,1) & 1; col < width; col+=2)
window[2][col] = BAYER(wlast,col);
}
thold = threshold/512;
for (col = (FC(row,0) & 1)+1; col < width-1; col+=2) {
avg = ( window[0][col-1] + window[0][col+1] +
window[2][col-1] + window[2][col+1] - blk[~row & 1]*4 )
* mul[row & 1] + (window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row,col)) - avg;
if (diff < -thold) diff += thold;
else if (diff > thold) diff -= thold;
else diff = 0;
BAYER(row,col) = CLIP(SQR(avg+diff) + 0.5);
}
}
}
free (fimg);
}
#endif
// green equilibration
void CLASS green_matching()
{
int i,j;
double m1,m2,c1,c2;
int o1_1,o1_2,o1_3,o1_4;
int o2_1,o2_2,o2_3,o2_4;
ushort (*img)[4];
const int margin = 3;
int oj = 2, oi = 2;
float f;
const float thr = 0.01f;
if(half_size || shrink) return;
if(FC(oj, oi) != 3) oj++;
if(FC(oj, oi) != 3) oi++;
if(FC(oj, oi) != 3) oj--;
img = (ushort (*)[4]) calloc (height*width, sizeof *image);
merror (img, "green_matching()");
memcpy(img,image,height*width*sizeof *image);
for(j=oj;j<height-margin;j+=2)
for(i=oi;i<width-margin;i+=2){
o1_1=img[(j-1)*width+i-1][1];
o1_2=img[(j-1)*width+i+1][1];
o1_3=img[(j+1)*width+i-1][1];
o1_4=img[(j+1)*width+i+1][1];
o2_1=img[(j-2)*width+i][3];
o2_2=img[(j+2)*width+i][3];
o2_3=img[j*width+i-2][3];
o2_4=img[j*width+i+2][3];
m1=(o1_1+o1_2+o1_3+o1_4)/4.0;
m2=(o2_1+o2_2+o2_3+o2_4)/4.0;
c1=(abs(o1_1-o1_2)+abs(o1_1-o1_3)+abs(o1_1-o1_4)+abs(o1_2-o1_3)+abs(o1_3-o1_4)+abs(o1_2-o1_4))/6.0;
c2=(abs(o2_1-o2_2)+abs(o2_1-o2_3)+abs(o2_1-o2_4)+abs(o2_2-o2_3)+abs(o2_3-o2_4)+abs(o2_2-o2_4))/6.0;
if((img[j*width+i][3]<maximum*0.95)&&(c1<maximum*thr)&&(c2<maximum*thr))
{
f = image[j*width+i][3]*m1/m2;
image[j*width+i][3]=f>0xffff?0xffff:f;
}
}
free(img);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img=0, *pix;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,0,2);
#endif
if (user_mul[0])
memcpy (pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1)) {
memset (dsum, 0, sizeof dsum);
bottom = MIN (greybox[1]+greybox[3], height);
right = MIN (greybox[0]+greybox[2], width);
for (row=greybox[1]; row < bottom; row += 8)
for (col=greybox[0]; col < right; col += 8) {
memset (sum, 0, sizeof sum);
for (y=row; y < row+8 && y < bottom; y++)
for (x=col; x < col+8 && x < right; x++)
FORC4 {
if (filters) {
c = fcol(y,x);
val = BAYER2(y,x);
} else
val = image[y*width+x][c];
if (val > maximum-25) goto skip_block;
if ((val -= cblack[c]) < 0) val = 0;
sum[c] += val;
sum[c+4]++;
if (filters) break;
}
FORC(8) dsum[c] += sum[c];
skip_block: ;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c+4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1) {
memset (sum, 0, sizeof sum);
for (row=0; row < 8; row++)
for (col=0; col < 8; col++) {
c = FC(row,col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c+4]++;
}
#ifdef LIBRAW_LIBRARY_BUILD
if(load_raw == &LibRaw::nikon_load_sraw)
{
// Nikon sRAW: camera WB already applied:
pre_mul[0]=pre_mul[1]=pre_mul[2]=pre_mul[3]=1.0;
}
else
#endif
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float) sum[c+4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy (pre_mul, cam_mul, sizeof pre_mul);
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Cannot use camera white balance.\n"), ifname);
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Nikon sRAW, daylight
if (load_raw == &LibRaw::nikon_load_sraw
&& !use_camera_wb && !use_auto_wb
&& cam_mul[0] > 0.001f && cam_mul[1] > 0.001f && cam_mul[2] > 0.001f )
{
for(c=0;c<3;c++)
pre_mul[c]/=cam_mul[c];
}
#endif
if (pre_mul[1] == 0) pre_mul[1] = 1;
if (pre_mul[3] == 0) pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold) wavelet_denoise();
maximum -= black;
for (dmin=DBL_MAX, dmax=c=0; c < 4; c++) {
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight) dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
#ifdef DCRAW_VERBOSE
if (verbose) {
fprintf (stderr,
_("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf (stderr, " %f", pre_mul[c]);
fputc ('\n', stderr);
}
#endif
if (filters > 1000 && (cblack[4]+1)/2 == 1 && (cblack[5]+1)/2 == 1) {
FORC4 cblack[FC(c/2,c%2)] +=
cblack[6 + c/2 % cblack[4] * cblack[5] + c%2 % cblack[5]];
cblack[4] = cblack[5] = 0;
}
size = iheight*iwidth;
#ifdef LIBRAW_LIBRARY_BUILD
scale_colors_loop(scale_mul);
#else
for (i=0; i < size*4; i++) {
if (!(val = image[0][i])) continue;
if (cblack[4] && cblack[5])
val -= cblack[6 + i/4 / iwidth % cblack[4] * cblack[5] +
i/4 % iwidth % cblack[5]];
val -= cblack[i & 3];
val *= scale_mul[i & 3];
image[0][i] = CLIP(val);
}
#endif
if ((aber[0] != 1 || aber[2] != 1) && colors == 3) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Correcting chromatic aberration...\n"));
#endif
for (c=0; c < 4; c+=2) {
if (aber[c] == 1) continue;
img = (ushort *) malloc (size * sizeof *img);
merror (img, "scale_colors()");
for (i=0; i < size; i++)
img[i] = image[i][c];
for (row=0; row < iheight; row++) {
ur = fr = (row - iheight*0.5) * aber[c] + iheight*0.5;
if (ur > iheight-2) continue;
fr -= ur;
for (col=0; col < iwidth; col++) {
uc = fc = (col - iwidth*0.5) * aber[c] + iwidth*0.5;
if (uc > iwidth-2) continue;
fc -= uc;
pix = img + ur*iwidth + uc;
image[row*iwidth+col][c] =
(pix[ 0]*(1-fc) + pix[ 1]*fc) * (1-fr) +
(pix[iwidth]*(1-fc) + pix[iwidth+1]*fc) * fr;
}
}
free(img);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS,1,2);
#endif
}
void CLASS pre_interpolate()
{
ushort (*img)[4];
int row, col, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,0,2);
#endif
if (shrink) {
if (half_size) {
height = iheight;
width = iwidth;
if (filters == 9) {
for (row=0; row < 3; row++)
for (col=1; col < 4; col++)
if (!(image[row*width+col][0] | image[row*width+col][2]))
goto break2; break2:
for ( ; row < height; row+=3)
for (col=(col-1)%3+1; col < width-1; col+=3) {
img = image + row*width+col;
for (c=0; c < 3; c+=2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
} else {
img = (ushort (*)[4]) calloc (height, width*sizeof *img);
merror (img, "pre_interpolate()");
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
c = fcol(row,col);
img[row*width+col][c] = image[(row >> 1)*iwidth+(col >> 1)][c];
}
free (image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3) {
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size) colors++;
else {
for (row = FC(1,0) >> 1; row < height; row+=2)
for (col = FC(row,1) & 1; col < width; col+=2)
image[row*width+col][1] = image[row*width+col][3];
filters &= ~((filters & 0x55555555) << 1);
}
}
if (half_size) filters = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE,1,2);
#endif
}
void CLASS border_interpolate (int border)
{
unsigned row, col, y, x, f, c, sum[8];
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
if (col==border && row >= border && row < height-border)
col = width-border;
memset (sum, 0, sizeof sum);
for (y=row-1; y != row+2; y++)
for (x=col-1; x != col+2; x++)
if (y < height && x < width) {
f = fcol(y,x);
sum[f] += image[y*width+x][f];
sum[f+4]++;
}
f = fcol(row,col);
FORCC if (c != f && sum[c+4])
image[row*width+col][c] = sum[c] / sum[c+4];
}
}
void CLASS lin_interpolate_loop(int code[16][16][32],int size)
{
int row;
for (row=1; row < height-1; row++)
{
int col,*ip;
ushort *pix;
for (col=1; col < width-1; col++) {
int i;
int sum[4];
pix = image[row*width+col];
ip = code[row % size][col % size];
memset (sum, 0, sizeof sum);
for (i=*ip++; i--; ip+=3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i=colors; --i; ip+=2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size=16, *ip, sum[4];
int f, c, x, y, row, col, shift, color;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Bilinear interpolation...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#endif
if (filters == 9) size = 6;
border_interpolate(1);
for (row=0; row < size; row++)
for (col=0; col < size; col++) {
ip = code[row][col]+1;
f = fcol(row,col);
memset (sum, 0, sizeof sum);
for (y=-1; y <= 1; y++)
for (x=-1; x <= 1; x++) {
shift = (y==0) + (x==0);
color = fcol(row+y,col+x);
if (color == f) continue;
*ip++ = (width*y + x)*4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f) {
*ip++ = c;
*ip++ = sum[c]>0?256 / sum[c]:0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#endif
lin_interpolate_loop(code,size);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#endif
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp, terms[] = {
-2,-2,+0,-1,0,0x01, -2,-2,+0,+0,1,0x01, -2,-1,-1,+0,0,0x01,
-2,-1,+0,-1,0,0x02, -2,-1,+0,+0,0,0x03, -2,-1,+0,+1,1,0x01,
-2,+0,+0,-1,0,0x06, -2,+0,+0,+0,1,0x02, -2,+0,+0,+1,0,0x03,
-2,+1,-1,+0,0,0x04, -2,+1,+0,-1,1,0x04, -2,+1,+0,+0,0,0x06,
-2,+1,+0,+1,0,0x02, -2,+2,+0,+0,1,0x04, -2,+2,+0,+1,0,0x04,
-1,-2,-1,+0,0,0x80, -1,-2,+0,-1,0,0x01, -1,-2,+1,-1,0,0x01,
-1,-2,+1,+0,1,0x01, -1,-1,-1,+1,0,0x88, -1,-1,+1,-2,0,0x40,
-1,-1,+1,-1,0,0x22, -1,-1,+1,+0,0,0x33, -1,-1,+1,+1,1,0x11,
-1,+0,-1,+2,0,0x08, -1,+0,+0,-1,0,0x44, -1,+0,+0,+1,0,0x11,
-1,+0,+1,-2,1,0x40, -1,+0,+1,-1,0,0x66, -1,+0,+1,+0,1,0x22,
-1,+0,+1,+1,0,0x33, -1,+0,+1,+2,1,0x10, -1,+1,+1,-1,1,0x44,
-1,+1,+1,+0,0,0x66, -1,+1,+1,+1,0,0x22, -1,+1,+1,+2,0,0x10,
-1,+2,+0,+1,0,0x04, -1,+2,+1,+0,1,0x04, -1,+2,+1,+1,0,0x04,
+0,-2,+0,+0,1,0x80, +0,-1,+0,+1,1,0x88, +0,-1,+1,-2,0,0x40,
+0,-1,+1,+0,0,0x11, +0,-1,+2,-2,0,0x40, +0,-1,+2,-1,0,0x20,
+0,-1,+2,+0,0,0x30, +0,-1,+2,+1,1,0x10, +0,+0,+0,+2,1,0x08,
+0,+0,+2,-2,1,0x40, +0,+0,+2,-1,0,0x60, +0,+0,+2,+0,1,0x20,
+0,+0,+2,+1,0,0x30, +0,+0,+2,+2,1,0x10, +0,+1,+1,+0,0,0x44,
+0,+1,+1,+2,0,0x10, +0,+1,+2,-1,1,0x40, +0,+1,+2,+0,0,0x60,
+0,+1,+2,+1,0,0x20, +0,+1,+2,+2,0,0x10, +1,-2,+1,+0,0,0x80,
+1,-1,+1,+1,0,0x88, +1,+0,+1,+2,0,0x08, +1,+0,+2,-1,0,0x40,
+1,+0,+2,+1,0,0x10
}, chood[] = { -1,-1, -1,0, -1,+1, 0,+1, +1,+1, +1,0, +1,-1, 0,-1 };
ushort (*brow[5])[4], *pix;
int prow=8, pcol=2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("VNG interpolation...\n"));
#endif
if (filters == 1) prow = pcol = 16;
if (filters == 9) prow = pcol = 6;
ip = (int *) calloc (prow*pcol, 1280);
merror (ip, "vng_interpolate()");
for (row=0; row < prow; row++) /* Precalculate for VNG */
for (col=0; col < pcol; col++) {
code[row][col] = ip;
for (cp=terms, t=0; t < 64; t++) {
y1 = *cp++; x1 = *cp++;
y2 = *cp++; x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row+y1,col+x1);
if (fcol(row+y2,col+x2) != color) continue;
diag = (fcol(row,col+1) == color && fcol(row+1,col) == color) ? 2:1;
if (abs(y1-y2) == diag && abs(x1-x2) == diag) continue;
*ip++ = (y1*width + x1)*4 + color;
*ip++ = (y2*width + x2)*4 + color;
*ip++ = weight;
for (g=0; g < 8; g++)
if (grads & 1<<g) *ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp=chood, g=0; g < 8; g++) {
y = *cp++; x = *cp++;
*ip++ = (y*width + x) * 4;
color = fcol(row,col);
if (fcol(row+y,col+x) != color && fcol(row+y*2,col+x*2) == color)
*ip++ = (y*width + x) * 8 + color;
else
*ip++ = 0;
}
}
brow[4] = (ushort (*)[4]) calloc (width*3, sizeof **brow);
merror (brow[4], "vng_interpolate()");
for (row=0; row < 3; row++)
brow[row] = brow[4] + row*width;
for (row=2; row < height-2; row++) { /* Do VNG interpolation */
#ifdef LIBRAW_LIBRARY_BUILD
if(!((row-2)%256))RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,(row-2)/256+1,((height-3)/256)+1);
#endif
for (col=2; col < width-2; col++) {
pix = image[row*width+col];
ip = code[row % prow][col % pcol];
memset (gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX) { /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1) continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g=1; g < 8; g++) {
if (gmin > gval[g]) gmin = gval[g];
if (gmax < gval[g]) gmax = gval[g];
}
if (gmax == 0) {
memcpy (brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset (sum, 0, sizeof sum);
color = fcol(row,col);
for (num=g=0; g < 8; g++,ip+=2) { /* Average the neighbors */
if (gval[g] <= thold) {
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC { /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (row > 3) /* Write buffer to image */
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
for (g=0; g < 4; g++)
brow[(g-1) & 3] = brow[g];
}
memcpy (image[(row-2)*width+2], brow[0]+2, (width-4)*sizeof *image);
memcpy (image[(row-1)*width+2], brow[1]+2, (width-4)*sizeof *image);
free (brow[4]);
free (code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = { 1, width, -1, -width, 1 };
int row, col, diff[2], guess[2], c, d, i;
ushort (*pix)[4];
border_interpolate(3);
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("PPG interpolation...\n"));
#endif
/* Fill in the green layer with gradients and pattern recognition: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,0,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=3; row < height-3; row++)
for (col=3+(FC(row,3) & 1), c=FC(row,col); col < width-3; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; i++) {
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2
- pix[-2*d][c] - pix[2*d][c];
diff[i] = ( ABS(pix[-2*d][c] - pix[ 0][c]) +
ABS(pix[ 2*d][c] - pix[ 0][c]) +
ABS(pix[ -d][1] - pix[ d][1]) ) * 3 +
( ABS(pix[ 3*d][1] - pix[ d][1]) +
ABS(pix[-3*d][1] - pix[-d][1]) ) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,1,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]) > 0; c=2-c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE,2,3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
pix = image + row*width+col;
for (i=0; (d=dir[i]+dir[i+1]) > 0; i++) {
diff[i] = ABS(pix[-d][c] - pix[d][c]) +
ABS(pix[-d][1] - pix[0][1]) +
ABS(pix[ d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2*pix[0][1]
- pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0]+guess[1]) >> 2);
}
}
void CLASS cielab (ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
#ifdef LIBRAW_NOTHREADS
static float cbrt[0x10000], xyz_cam[3][4];
#else
#define cbrt tls->ahd_data.cbrt
#define xyz_cam tls->ahd_data.xyz_cam
#endif
if (!rgb) {
#ifndef LIBRAW_NOTHREADS
if(cbrt[0] < -1.0f)
#endif
for (i=0; i < 0x10000; i++) {
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r,1.f/3.0f) : 7.787f*r + 16.f/116.0f;
}
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (xyz_cam[i][j] = k=0; k < 3; k++)
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC {
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int) xyz[0])];
xyz[1] = cbrt[CLIP((int) xyz[1])];
xyz[2] = cbrt[CLIP((int) xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
#ifndef LIBRAW_NOTHREADS
#undef cbrt
#undef xyz_cam
#endif
}
#define TS 512 /* Tile Size */
#define fcol(row,col) xtrans[(row+6) % 6][(col+6) % 6]
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate (int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = { 1,0,0,1,-1,0,0,-1,1,0,0,1 },
patt[2][16] = { { 0,1,0,-1,2,0,-1,0,1,1,1,-1,0,0,0,0 },
{ 0,1,0,-2,1,0,-2,0,1,1,-2,-2,1,-1,-1,1 } },
dir[4] = { 1,TS,TS+1,TS-1 };
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab) [TS][3], (*lix)[3];
float (*drv)[TS][TS], diff[6], tr;
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("%d-pass X-Trans interpolation...\n"), passes);
#endif
cielab (0,0);
ndir = 4 << (passes > 1);
buffer = (char *) malloc (TS*TS*(ndir*11+6));
merror (buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*) [TS][3])(buffer + TS*TS*(ndir*6));
drv = (float (*)[TS][TS]) (buffer + TS*TS*(ndir*6+6));
homo = (char (*)[TS][TS]) (buffer + TS*TS*(ndir*10+6));
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row=0; row < 3; row++)
for (col=0; col < 3; col++)
for (ng=d=0; d < 10; d+=2) {
g = fcol(row,col) == 1;
if (fcol(row+orth[d],col+orth[d+2]) == 1) ng=0; else ng++;
if (ng == 4) { sgrow = row; sgcol = col; }
if (ng == g+1) FORC(8) {
v = orth[d ]*patt[g][c*2] + orth[d+1]*patt[g][c*2+1];
h = orth[d+2]*patt[g][c*2] + orth[d+3]*patt[g][c*2+1];
allhex[row][col][0][c^(g*2 & d)] = h + v*width;
allhex[row][col][1][c^(g*2 & d)] = h + v*TS;
}
}
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row=2; row < height-2; row++)
for (min=~(max=0), col=2; col < width-2; col++) {
if (fcol(row,col) == 1 && (min=~(max=0))) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
if (!max) FORC(6) {
val = pix[hex[c]][1];
if (min > val) min = val;
if (max < val) max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row-sgrow) % 3) {
case 1: if (row < height-3) { row++; col--; } break;
case 2: if ((min=~(max=0)) && (col+=2) < width-3 && row > 2) row--;
}
}
for (top=3; top < height-19; top += TS-16)
for (left=3; left < width-19; left += TS-16) {
mrow = MIN (top+TS, height-3);
mcol = MIN (left+TS, width-3);
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++)
memcpy (rgb[0][row-top][col-left], image[row*width+col], 6);
FORC3 memcpy (rgb[c+1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row=top; row < mrow; row++)
for (col=left; col < mcol; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[ hex[1]][1] + pix[ hex[0]][1]) -
46 * (pix[2*hex[1]][1] + pix[2*hex[0]][1]);
color[1][1] = 223 * pix[ hex[3]][1] + pix[ hex[2]][1] * 33 +
92 * (pix[ 0 ][f] - pix[ -hex[2]][f]);
FORC(2) color[1][2+c] =
164 * pix[hex[4+c]][1] + 92 * pix[-2*hex[4+c]][1] + 33 *
(2*pix[0][f] - pix[3*hex[4+c]][f] - pix[-3*hex[4+c]][f]);
FORC4 rgb[c^!((row-sgrow) % 3)][row-top][col-left][1] =
LIM(color[1][c] >> 8,pix[0][1],pix[0][3]);
}
for (pass=0; pass < passes; pass++) {
if (pass == 1)
memcpy (rgb+=4, buffer, 4*sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass) {
for (row=top+2; row < mrow-2; row++)
for (col=left+2; col < mcol-2; col++) {
if ((f = fcol(row,col)) == 1) continue;
pix = image + row*width + col;
hex = allhex[row % 3][col % 3][1];
for (d=3; d < 6; d++) {
rix = &rgb[(d-2)^!((row-sgrow) % 3)][row-top][col-left];
val = rix[-2*hex[d]][1] + 2*rix[hex[d]][1]
- rix[-2*hex[d]][f] - 2*rix[hex[d]][f] + 3*rix[0][f];
rix[0][1] = LIM(val/3,pix[0][1],pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row=(top-sgrow+4)/3*3+sgrow; row < mrow-2; row+=3)
for (col=(left-sgcol+4)/3*3+sgcol; col < mcol-2; col+=3) {
rix = &rgb[0][row-top][col-left];
h = fcol(row,col+1);
memset (diff, 0, sizeof diff);
for (i=1, d=0; d < 6; d++, i^=TS^1, h^=2) {
for (c=0; c < 2; c++, h^=2) {
g = 2*rix[0][1] - rix[i<<c][1] - rix[-i<<c][1];
color[h][d] = g + rix[i<<c][h] + rix[-i<<c][h];
if (d > 1)
diff[d] += SQR (rix[i<<c][1] - rix[-i<<c][1]
- rix[i<<c][h] + rix[-i<<c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d-1] < diff[d])
FORC(2) color[c*2][d] = color[c*2][d-1];
if (d < 2 || (d & 1)) {
FORC(2) rix[0][c*2] = CLIP(color[c*2][d]/2);
rix += TS*TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row=top+3; row < mrow-3; row++)
for (col=left+3; col < mcol-3; col++) {
if ((f = 2-fcol(row,col)) == 1) continue;
rix = &rgb[0][row-top][col-left];
c = (row-sgrow) % 3 ? TS:1;
h = 3 * (c ^ TS ^ 1);
for (d=0; d < 4; d++, rix += TS*TS) {
i = d > 1 || ((d ^ c) & 1) ||
((ABS(rix[0][1]-rix[c][1])+ABS(rix[0][1]-rix[-c][1])) <
2*(ABS(rix[0][1]-rix[h][1])+ABS(rix[0][1]-rix[-h][1]))) ? c:h;
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] +
2*rix[0][1] - rix[i][1] - rix[-i][1])/2);
}
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row=top+2; row < mrow-2; row++) if ((row-sgrow) % 3)
for (col=left+2; col < mcol-2; col++) if ((col-sgcol) % 3) {
rix = &rgb[0][row-top][col-left];
hex = allhex[row % 3][col % 3][1];
for (d=0; d < ndir; d+=2, rix += TS*TS)
if (hex[d] + hex[d+1]) {
g = 3*rix[0][1] - 2*rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + 2*rix[hex[d]][c] + rix[hex[d+1]][c])/3);
} else {
g = 2*rix[0][1] - rix[hex[d]][1] - rix[hex[d+1]][1];
for (c=0; c < 4; c+=2) rix[0][c] =
CLIP((g + rix[hex[d]][c] + rix[hex[d+1]][c])/2);
}
}
}
rgb = (ushort(*)[TS][TS][3]) buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d=0; d < ndir; d++) {
for (row=2; row < mrow-2; row++)
for (col=2; col < mcol-2; col++)
cielab (rgb[d][row][col], lab[row][col]);
for (f=dir[d & 3],row=3; row < mrow-3; row++)
for (col=3; col < mcol-3; col++) {
lix = &lab[row][col];
g = 2*lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g)
+ SQR((2*lix[0][1] - lix[f][1] - lix[-f][1] + g*500/232))
+ SQR((2*lix[0][2] - lix[f][2] - lix[-f][2] - g*500/580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir*TS*TS);
for (row=4; row < mrow-4; row++)
for (col=4; col < mcol-4; col++) {
for (tr=FLT_MAX, d=0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d=0; d < ndir; d++)
for (v=-1; v <= 1; v++)
for (h=-1; h <= 1; h++)
if (drv[d][row+v][col+h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height-top < TS+4) mrow = height-top+2;
if (width-left < TS+4) mcol = width-left+2;
for (row = MIN(top,8); row < mrow-8; row++)
for (col = MIN(left,8); col < mcol-8; col++) {
for (d=0; d < ndir; d++)
for (hm[d]=0, v=-2; v <= 2; v++)
for (h=-2; h <= 2; h++)
hm[d] += homo[d][row+v][col+h];
for (d=0; d < ndir-4; d++)
if (hm[d] < hm[d+4]) hm[d ] = 0; else
if (hm[d] > hm[d+4]) hm[d+4] = 0;
for (max=hm[0],d=1; d < ndir; d++)
if (max < hm[d]) max = hm[d];
max -= max >> 3;
memset (avg, 0, sizeof avg);
for (d=0; d < ndir; d++)
if (hm[d] >= max) {
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row+top)*width+col+left][c] = avg[c]/avg[3];
}
}
free(buffer);
border_interpolate(8);
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3])
{
int row, col;
int c, val;
ushort (*pix)[4];
const int rowlimit = MIN(top+TS, height-2);
const int collimit = MIN(left+TS, width-2);
for (row = top; row < rowlimit; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < collimit; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
out_rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
out_rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
}
void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3], short (*out_lab)[TS][3])
{
unsigned row, col;
int c, val;
ushort (*pix)[4];
ushort (*rix)[3];
short (*lix)[3];
float xyz[3];
const unsigned num_pix_per_row = 4*width;
const unsigned rowlimit = MIN(top+TS-1, height-3);
const unsigned collimit = MIN(left+TS-1, width-3);
ushort *pix_above;
ushort *pix_below;
int t1, t2;
for (row = top+1; row < rowlimit; row++) {
pix = image + row*width + left;
rix = &inout_rgb[row-top][0];
lix = &out_lab[row-top][0];
for (col = left+1; col < collimit; col++) {
pix++;
pix_above = &pix[0][0] - num_pix_per_row;
pix_below = &pix[0][0] + num_pix_per_row;
rix++;
lix++;
c = 2 - FC(row, col);
if (c == 1) {
c = FC(row+1,col);
t1 = 2-c;
val = pix[0][1] + (( pix[-1][t1] + pix[1][t1]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][t1] = CLIP(val);
val = pix[0][1] + (( pix_above[c] + pix_below[c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else {
t1 = -4+c; /* -4+c: pixel of color c to the left */
t2 = 4+c; /* 4+c: pixel of color c to the right */
val = rix[0][1] + (( pix_above[t1] + pix_above[t2]
+ pix_below[t1] + pix_below[t2]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
}
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab(rix[0],lix[0]);
}
}
}
void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3], short (*out_lab)[TS][TS][3])
{
int direction;
for (direction = 0; direction < 2; direction++) {
ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]);
}
}
void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3], char (*out_homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int direction;
int i;
short (*lix)[3];
short (*lixs[2])[3];
short *adjacent_lix;
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
static const int dir[4] = { -1, 1, -TS, TS };
const int rowlimit = MIN(top+TS-2, height-4);
const int collimit = MIN(left+TS-2, width-4);
int homogeneity;
char (*homogeneity_map_p)[2];
memset (out_homogeneity_map, 0, 2*TS*TS);
for (row=top+2; row < rowlimit; row++) {
tr = row-top;
homogeneity_map_p = &out_homogeneity_map[tr][1];
for (direction=0; direction < 2; direction++) {
lixs[direction] = &lab[direction][tr][1];
}
for (col=left+2; col < collimit; col++) {
tc = col-left;
homogeneity_map_p++;
for (direction=0; direction < 2; direction++) {
lix = ++lixs[direction];
for (i=0; i < 4; i++) {
adjacent_lix = lix[dir[i]];
ldiff[direction][i] = ABS(lix[0][0]-adjacent_lix[0]);
abdiff[direction][i] = SQR(lix[0][1]-adjacent_lix[1])
+ SQR(lix[0][2]-adjacent_lix[2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (direction=0; direction < 2; direction++) {
homogeneity = 0;
for (i=0; i < 4; i++) {
if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps) {
homogeneity++;
}
}
homogeneity_map_p[0][direction] = homogeneity;
}
}
}
}
void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3], char (*homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int i, j;
int direction;
int hm[2];
int c;
const int rowlimit = MIN(top+TS-3, height-5);
const int collimit = MIN(left+TS-3, width-5);
ushort (*pix)[4];
ushort (*rix[2])[3];
for (row=top+3; row < rowlimit; row++) {
tr = row-top;
pix = &image[row*width+left+2];
for (direction = 0; direction < 2; direction++) {
rix[direction] = &rgb[direction][tr][2];
}
for (col=left+3; col < collimit; col++) {
tc = col-left;
pix++;
for (direction = 0; direction < 2; direction++) {
rix[direction]++;
}
for (direction=0; direction < 2; direction++) {
hm[direction] = 0;
for (i=tr-1; i <= tr+1; i++) {
for (j=tc-1; j <= tc+1; j++) {
hm[direction] += homogeneity_map[i][j][direction];
}
}
}
if (hm[0] != hm[1]) {
memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort));
} else {
FORC3 {
pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1;
}
}
}
}
}
void CLASS ahd_interpolate()
{
int i, j, k, top, left;
float xyz_cam[3][4],r;
char *buffer;
ushort (*rgb)[TS][TS][3];
short (*lab)[TS][TS][3];
char (*homo)[TS][2];
int terminate_flag = 0;
cielab(0,0);
border_interpolate(5);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel private(buffer,rgb,lab,homo,top,left,i,j,k) shared(xyz_cam,terminate_flag)
#endif
#endif
{
buffer = (char *) malloc (26*TS*TS); /* 1664 kB */
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][2]) (buffer + 24*TS*TS);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp for schedule(dynamic)
#endif
#endif
for (top=2; top < height-5; top += TS-6){
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
if(0== omp_get_thread_num())
#endif
if(callbacks.progress_cb) {
int rr = (*callbacks.progress_cb)(callbacks.progresscb_data,LIBRAW_PROGRESS_INTERPOLATE,top-2,height-7);
if(rr)
terminate_flag = 1;
}
#endif
for (left=2; !terminate_flag && (left < width-5); left += TS-6) {
ahd_interpolate_green_h_and_v(top, left, rgb);
ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab);
ahd_interpolate_build_homogeneity_map(top, left, lab, homo);
ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo);
}
}
free (buffer);
}
#ifdef LIBRAW_LIBRARY_BUILD
if(terminate_flag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
}
#else
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = { -1, 1, -TS, TS };
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort (*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short (*lab)[TS][TS][3], (*lix)[3];
char (*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("AHD interpolation...\n"));
#endif
cielab (0,0);
border_interpolate(5);
buffer = (char *) malloc (26*TS*TS);
merror (buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3]) buffer;
lab = (short (*)[TS][TS][3])(buffer + 12*TS*TS);
homo = (char (*)[TS][TS]) (buffer + 24*TS*TS);
for (top=2; top < height-5; top += TS-6)
for (left=2; left < width-5; left += TS-6) {
/* Interpolate green horizontally and vertically: */
for (row=top; row < top+TS && row < height-2; row++) {
col = left + (FC(row,left) & 1);
for (c = FC(row,col); col < left+TS && col < width-2; col+=2) {
pix = image + row*width+col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2
- pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row-top][col-left][1] = ULIM(val,pix[-1][1],pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2
- pix[-2*width][c] - pix[2*width][c]) >> 2;
rgb[1][row-top][col-left][1] = ULIM(val,pix[-width][1],pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d=0; d < 2; d++)
for (row=top+1; row < top+TS-1 && row < height-3; row++)
for (col=left+1; col < left+TS-1 && col < width-3; col++) {
pix = image + row*width+col;
rix = &rgb[d][row-top][col-left];
lix = &lab[d][row-top][col-left];
if ((c = 2 - FC(row,col)) == 1) {
c = FC(row+1,col);
val = pix[0][1] + (( pix[-1][2-c] + pix[1][2-c]
- rix[-1][1] - rix[1][1] ) >> 1);
rix[0][2-c] = CLIP(val);
val = pix[0][1] + (( pix[-width][c] + pix[width][c]
- rix[-TS][1] - rix[TS][1] ) >> 1);
} else
val = rix[0][1] + (( pix[-width-1][c] + pix[-width+1][c]
+ pix[+width-1][c] + pix[+width+1][c]
- rix[-TS-1][1] - rix[-TS+1][1]
- rix[+TS-1][1] - rix[+TS+1][1] + 1) >> 2);
rix[0][c] = CLIP(val);
c = FC(row,col);
rix[0][c] = pix[0][c];
cielab (rix[0],lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset (homo, 0, 2*TS*TS);
for (row=top+2; row < top+TS-2 && row < height-4; row++) {
tr = row-top;
for (col=left+2; col < left+TS-2 && col < width-4; col++) {
tc = col-left;
for (d=0; d < 2; d++) {
lix = &lab[d][tr][tc];
for (i=0; i < 4; i++) {
ldiff[d][i] = ABS(lix[0][0]-lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1]-lix[dir[i]][1])
+ SQR(lix[0][2]-lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0],ldiff[0][1]),
MAX(ldiff[1][2],ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0],abdiff[0][1]),
MAX(abdiff[1][2],abdiff[1][3]));
for (d=0; d < 2; d++)
for (i=0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row=top+3; row < top+TS-3 && row < height-5; row++) {
tr = row-top;
for (col=left+3; col < left+TS-3 && col < width-5; col++) {
tc = col-left;
for (d=0; d < 2; d++)
for (hm[d]=0, i=tr-1; i <= tr+1; i++)
for (j=tc-1; j <= tc+1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row*width+col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row*width+col][c] =
(rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free (buffer);
}
#endif
#undef TS
void CLASS median_filter()
{
ushort (*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{ 1,2, 4,5, 7,8, 0,1, 3,4, 6,7, 1,2, 4,5, 7,8,
0,3, 5,8, 4,7, 3,6, 1,4, 2,5, 4,7, 4,2, 6,4, 4,2 };
for (pass=1; pass <= med_passes; pass++) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER,pass-1,med_passes);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
for (pix = image; pix < image+width*height; pix++)
pix[0][3] = pix[0][c];
for (pix = image+width; pix < image+width*(height-1); pix++) {
if ((pix-image+1) % width < 2) continue;
for (k=0, i = -width; i <= width; i += width)
for (j = i-1; j <= i+1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i=0; i < sizeof opt; i+=2)
if (med[opt[i]] > med[opt[i+1]])
SWAP (med[opt[i]] , med[opt[i+1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip=INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] =
{ { { 1,1,1 }, { 1.7320508,-1.7320508,0 }, { -1,-1,2 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
static const float itrans[2][4][4] =
{ { { 1,0.8660254,-0.5 }, { 1,-0.8660254,-0.5 }, { 1,0,1 } },
{ { 1,1,1,1 }, { 1,-1,1,-1 }, { 1,1,-1,-1 }, { 1,-1,-1,1 } } };
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned) (colors-3) > 1) return;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,0,2);
#endif
FORCC if (clip > (i = 65535*pre_mul[c])) clip = i;
for (row=0; row < height; row++)
for (col=0; col < width; col++) {
FORCC if (image[row*width+col][c] > clip) break;
if (c == colors) continue;
FORCC {
cam[0][c] = image[row*width+col][c];
cam[1][c] = MIN(cam[0][c],clip);
}
for (i=0; i < 2; i++) {
FORCC for (lab[i][c]=j=0; j < colors; j++)
lab[i][c] += trans[colors-3][c][j] * cam[i][j];
for (sum[i]=0,c=1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1]/sum[0]);
for (c=1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c]=j=0; j < colors; j++)
cam[0][c] += itrans[colors-3][c][j] * lab[0][j];
FORCC image[row*width+col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,1,2);
#endif
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] =
{ {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Rebuilding highlights...\n"));
#endif
grow = pow (2.0, 4-highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc=0, c=1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c]) kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *) calloc (high, wide*sizeof *map);
merror (map, "recover_highlights()");
FORCC if (c != kc) {
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS,c-1,colors-1);
#endif
memset (map, 0, high*wide*sizeof *map);
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
sum = wgt = count = 0;
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000) {
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE*SCALE)
map[mrow*wide+mcol] = sum / wgt;
}
for (spread = 32/grow; spread--; ) {
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
if (map[mrow*wide+mcol]) continue;
sum = count = 0;
for (d=0; d < 8; d++) {
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y*wide+x] > 0) {
sum += (1 + (d & 1)) * map[y*wide+x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow*wide+mcol] = - (sum+grow) / (count+grow);
}
for (change=i=0; i < high*wide; i++)
if (map[i] < 0) {
map[i] = -map[i];
change = 1;
}
if (!change) break;
}
for (i=0; i < high*wide; i++)
if (map[i] == 0) map[i] = 1;
for (mrow=0; mrow < high; mrow++)
for (mcol=0; mcol < wide; mcol++) {
for (row = mrow*SCALE; row < (mrow+1)*SCALE; row++)
for (col = mcol*SCALE; col < (mcol+1)*SCALE; col++) {
pixel = image[row*width+col];
if (pixel[c] / hsat[c] > 1) {
val = pixel[kc] * map[mrow*wide+mcol];
if (pixel[c] < val) pixel[c] = CLIP(val);
}
}
}
}
free (map);
}
#undef SCALE
void CLASS tiff_get (unsigned base,
unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248484"[*type < 14 ? *type:0]-'0') > 4)
fseek (ifp, get4()+base, SEEK_SET);
}
void CLASS parse_thumb_note (int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == toff) thumb_offset = get4()+base;
if (tag == tlen) thumb_length = get4();
fseek (ifp, save, SEEK_SET);
}
}
//@end COMMON
int CLASS parse_tiff_ifd (int base);
//@out COMMON
static float powf_lim(float a, float b, float limup)
{
return (b>limup || b < -limup)?0.f:powf(a,b);
}
static float powf64(float a, float b)
{
return powf_lim(a,b,64.f);
}
#ifdef LIBRAW_LIBRARY_BUILD
static float my_roundf(float x) {
float t;
if (x >= 0.0) {
t = ceilf(x);
if (t - x > 0.5) t -= 1.0;
return t;
} else {
t = ceilf(-x);
if (t + x > 0.5) t -= 1.0;
return -t;
}
}
static float _CanonConvert2EV(short in)
{
float frac1;
short val = in, sign = 1, frac;
if (val < 0) { val = -val; sign = -1; }
frac = (val & 0x1f);
val -= frac;
if (frac == 0x0c) frac1 = 32.0f / 3.0f;
else if (frac == 0x14) frac1 = 64.0f / 3.0f;
else frac1 = (float)frac;
return (float)sign * ((float)val + frac1) / 32.0f;
}
static float _CanonConvertAperture(short in)
{
if (in == (short)0xffe0) return 0.0f;
else return powf64(2.0f, _CanonConvert2EV(in) / 2.0f);
}
void CLASS setCanonBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
if (
(id == 0x80000001) || // 1D
(id == 0x80000174) || // 1D2
(id == 0x80000232) || // 1D2N
(id == 0x80000169) || // 1D3
(id == 0x80000281) // 1D4
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSH;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000167) || // 1Ds
(id == 0x80000188) || // 1Ds2
(id == 0x80000215) || // 1Ds3
(id == 0x80000213) || // 5D
(id == 0x80000218) || // 5D2
(id == 0x80000285) || // 5D3
(id == 0x80000302) || // 6D
(id == 0x80000269) || // 1DX
(id == 0x80000324) || // 1DC
(id == 0x80000382) || // 5DS
(id == 0x80000401) // 5DS R
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else
if (
(id == 0x80000331) || // M
(id == 0x80000355) // M2
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF_M;
}
else
if (
(id == 0x01140000) || // D30
(id == 0x01668000) || // D60
(id > 0x80000000)
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Unknown;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS processCanonCameraInfo (unsigned id, uchar *CameraInfo)
{
ushort iCanonLensID = 0, iCanonMaxFocal = 0, iCanonMinFocal = 0, iCanonLens = 0, iCanonCurFocal = 0, iCanonFocalType = 0;
CameraInfo[0] = 0;
CameraInfo[1] = 0;
switch (id) {
case 0x80000001: // 1D
case 0x80000167: // 1DS
iCanonCurFocal = 10;
iCanonLensID = 13;
iCanonMinFocal = 14;
iCanonMaxFocal = 16;
if (!imgdata.lens.makernotes.CurFocal)
imgdata.lens.makernotes.CurFocal = sget2(CameraInfo + iCanonCurFocal);
if (!imgdata.lens.makernotes.MinFocal)
imgdata.lens.makernotes.MinFocal = sget2(CameraInfo + iCanonMinFocal);
if (!imgdata.lens.makernotes.MaxFocal)
imgdata.lens.makernotes.MaxFocal = sget2(CameraInfo + iCanonMaxFocal);
break;
case 0x80000174: // 1DMkII
case 0x80000188: // 1DsMkII
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
iCanonFocalType = 45;
break;
case 0x80000232: // 1DMkII N
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
break;
case 0x80000169: // 1DMkIII
case 0x80000215: // 1DsMkIII
iCanonCurFocal = 29;
iCanonLensID = 273;
iCanonMinFocal = 275;
iCanonMaxFocal = 277;
break;
case 0x80000281: // 1DMkIV
iCanonCurFocal = 30;
iCanonLensID = 335;
iCanonMinFocal = 337;
iCanonMaxFocal = 339;
break;
case 0x80000269: // 1D X
iCanonCurFocal = 35;
iCanonLensID = 423;
iCanonMinFocal = 425;
iCanonMaxFocal = 427;
break;
case 0x80000213: // 5D
iCanonCurFocal = 40;
if (!sget2Rev(CameraInfo + 12)) iCanonLensID = 151;
else iCanonLensID = 12;
iCanonMinFocal = 147;
iCanonMaxFocal = 149;
break;
case 0x80000218: // 5DMkII
iCanonCurFocal = 30;
iCanonLensID = 230;
iCanonMinFocal = 232;
iCanonMaxFocal = 234;
break;
case 0x80000285: // 5DMkIII
iCanonCurFocal = 35;
iCanonLensID = 339;
iCanonMinFocal = 341;
iCanonMaxFocal = 343;
break;
case 0x80000302: // 6D
iCanonCurFocal = 35;
iCanonLensID = 353;
iCanonMinFocal = 355;
iCanonMaxFocal = 357;
break;
case 0x80000250: // 7D
iCanonCurFocal = 30;
iCanonLensID = 274;
iCanonMinFocal = 276;
iCanonMaxFocal = 278;
break;
case 0x80000190: // 40D
iCanonCurFocal = 29;
iCanonLensID = 214;
iCanonMinFocal = 216;
iCanonMaxFocal = 218;
iCanonLens = 2347;
break;
case 0x80000261: // 50D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000287: // 60D
iCanonCurFocal = 30;
iCanonLensID = 232;
iCanonMinFocal = 234;
iCanonMaxFocal = 236;
break;
case 0x80000325: // 70D
iCanonCurFocal = 35;
iCanonLensID = 358;
iCanonMinFocal = 360;
iCanonMaxFocal = 362;
break;
case 0x80000176: // 450D
iCanonCurFocal = 29;
iCanonLensID = 222;
iCanonLens = 2355;
break;
case 0x80000252: // 500D
iCanonCurFocal = 30;
iCanonLensID = 246;
iCanonMinFocal = 248;
iCanonMaxFocal = 250;
break;
case 0x80000270: // 550D
iCanonCurFocal = 30;
iCanonLensID = 255;
iCanonMinFocal = 257;
iCanonMaxFocal = 259;
break;
case 0x80000286: // 600D
case 0x80000288: // 1100D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000301: // 650D
case 0x80000326: // 700D
iCanonCurFocal = 35;
iCanonLensID = 295;
iCanonMinFocal = 297;
iCanonMaxFocal = 299;
break;
case 0x80000254: // 1000D
iCanonCurFocal = 29;
iCanonLensID = 226;
iCanonMinFocal = 228;
iCanonMaxFocal = 230;
iCanonLens = 2359;
break;
}
if (iCanonFocalType)
{
imgdata.lens.makernotes.FocalType = CameraInfo[iCanonFocalType];
if (!imgdata.lens.makernotes.FocalType) // zero means 'fixed' here, replacing with standard '1'
imgdata.lens.makernotes.FocalType = 1;
}
if (!imgdata.lens.makernotes.CurFocal)
imgdata.lens.makernotes.CurFocal = sget2Rev(CameraInfo + iCanonCurFocal);
if (!imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensID = sget2Rev(CameraInfo + iCanonLensID);
if (!imgdata.lens.makernotes.MinFocal)
imgdata.lens.makernotes.MinFocal = sget2Rev(CameraInfo + iCanonMinFocal);
if (!imgdata.lens.makernotes.MaxFocal)
imgdata.lens.makernotes.MaxFocal = sget2Rev(CameraInfo + iCanonMaxFocal);
if (!imgdata.lens.makernotes.Lens[0] && iCanonLens) {
if (CameraInfo[iCanonLens] < 65) // non-Canon lens
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 64);
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-S", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "EF-S ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "TS-E", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "TS-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "TS-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "MP-E", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "MP-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "MP-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-M", 4)) {
memcpy(imgdata.lens.makernotes.Lens, "EF-M ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-M", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else {
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 2);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF", 2);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.Lens[2] = 32;
memcpy(imgdata.lens.makernotes.Lens + 3, CameraInfo + iCanonLens + 2, 62);
}
}
free(CameraInfo);
return;
}
void CLASS processNikonLensData (uchar *LensData, unsigned len)
{
ushort i;
if (len < 20) {
switch (len) {
case 9:
i = 2;
break;
case 15:
i = 7;
break;
case 16:
i = 8;
break;
}
imgdata.lens.nikon.NikonLensIDNumber = LensData[i];
imgdata.lens.nikon.NikonLensFStops = LensData[i + 1];
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops /12.0f;
imgdata.lens.makernotes.MinFocal = 5.0f * powf64(2.0f, (float)LensData[i + 2] / 24.0f);
imgdata.lens.makernotes.MaxFocal = 5.0f * powf64(2.0f, (float)LensData[i + 3] / 24.0f);
imgdata.lens.makernotes.MaxAp4MinFocal = powf64(2.0f, (float)LensData[i + 4] / 24.0f);
imgdata.lens.makernotes.MaxAp4MaxFocal = powf64(2.0f, (float)LensData[i + 5] / 24.0f);
imgdata.lens.nikon.NikonMCUVersion = LensData[i + 6];
if (i != 2)
{
imgdata.lens.makernotes.CurFocal = 5.0f * powf64(2.0f, (float)LensData[i - 1] / 24.0f);
imgdata.lens.nikon.NikonEffectiveMaxAp = powf64(2.0f, (float)LensData[i + 7] / 24.0f);
}
imgdata.lens.makernotes.LensID =
(unsigned long long) LensData[i] << 56 |
(unsigned long long) LensData[i + 1] << 48 |
(unsigned long long) LensData[i + 2] << 40 |
(unsigned long long) LensData[i + 3] << 32 |
(unsigned long long) LensData[i + 4] << 24 |
(unsigned long long) LensData[i + 5] << 16 |
(unsigned long long) LensData[i + 6] << 8 |
(unsigned long long) imgdata.lens.nikon.NikonLensType;
}
else if ((len == 459) || (len == 590))
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 390, 64);
}
else if (len == 509)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 391, 64);
}
else if (len == 879)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 680, 64);
}
free (LensData);
return;
}
void CLASS setOlympusBodyFeatures (unsigned long id)
{
imgdata.lens.makernotes.CamID = id;
if ((id == 0x4434303430) ||
(id == 0x4434303431) ||
((id >= 0x5330303030) && (id <= 0x5330303939)))
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FT;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT;
}
else
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
if ((id == 0x4434303430) ||
(id == 0x4434303431) ||
((id >= 0x5330303033) && (id <= 0x5330303138)) ||
(id == 0x5330303233) ||
(id == 0x5330303239) ||
(id == 0x5330303330) ||
(id == 0x5330303333))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT;
}
else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_mFT;
}
return;
}
void CLASS setPentaxBodyFeatures (unsigned id)
{
imgdata.lens.makernotes.CamID = id;
switch (id) {
case 0x12994:
case 0x12aa2:
case 0x12b1a:
case 0x12b60:
case 0x12b7e:
case 0x12b80:
case 0x12b9c:
case 0x12b9d:
case 0x12ba2:
case 0x12c1e:
case 0x12c20:
case 0x12cd2:
case 0x12cd4:
case 0x12cfa:
case 0x12d72:
case 0x12d73:
case 0x12db8:
case 0x12dfe:
case 0x12e6c:
case 0x12e76:
case 0x12ef8:
case 0x12f52:
case 0x12f70:
case 0x12f71:
case 0x12fb6:
case 0x12fc0:
case 0x12fca:
case 0x1301a:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
break;
case 0x12e08:
case 0x13010:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_MF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_MF;
break;
case 0x12ee4:
case 0x12f66:
case 0x12f7a:
case 0x1302e:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_Q;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_Q;
break;
default:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS setPhaseOneFeatures (unsigned id) {
ushort i;
static const struct {
ushort id;
char t_model[32];
} p1_unique[] = {
// Phase One section:
{1, "Hasselblad V"},
{10, "PhaseOne/Mamiya"},
{12, "Contax 645"},
{16, "Hasselblad V"},
{17, "Hasselblad V"},
{18, "Contax 645"},
{19, "PhaseOne/Mamiya"},
{20, "Hasselblad V"},
{21, "Contax 645"},
{22, "PhaseOne/Mamiya"},
{23, "Hasselblad V"},
{24, "Hasselblad H"},
{25, "PhaseOne/Mamiya"},
{32, "Contax 645"},
{34, "Hasselblad V"},
{35, "Hasselblad V"},
{36, "Hasselblad H"},
{37, "Contax 645"},
{38, "PhaseOne/Mamiya"},
{39, "Hasselblad V"},
{40, "Hasselblad H"},
{41, "Contax 645"},
{42, "PhaseOne/Mamiya"},
{44, "Hasselblad V"},
{45, "Hasselblad H"},
{46, "Contax 645"},
{47, "PhaseOne/Mamiya"},
{48, "Hasselblad V"},
{49, "Hasselblad H"},
{50, "Contax 645"},
{51, "PhaseOne/Mamiya"},
{52, "Hasselblad V"},
{53, "Hasselblad H"},
{54, "Contax 645"},
{55, "PhaseOne/Mamiya"},
{67, "Hasselblad V"},
{68, "Hasselblad H"},
{69, "Contax 645"},
{70, "PhaseOne/Mamiya"},
{71, "Hasselblad V"},
{72, "Hasselblad H"},
{73, "Contax 645"},
{74, "PhaseOne/Mamiya"},
{76, "Hasselblad V"},
{77, "Hasselblad H"},
{78, "Contax 645"},
{79, "PhaseOne/Mamiya"},
{80, "Hasselblad V"},
{81, "Hasselblad H"},
{82, "Contax 645"},
{83, "PhaseOne/Mamiya"},
{84, "Hasselblad V"},
{85, "Hasselblad H"},
{86, "Contax 645"},
{87, "PhaseOne/Mamiya"},
{99, "Hasselblad V"},
{100, "Hasselblad H"},
{101, "Contax 645"},
{102, "PhaseOne/Mamiya"},
{103, "Hasselblad V"},
{104, "Hasselblad H"},
{105, "PhaseOne/Mamiya"},
{106, "Contax 645"},
{112, "Hasselblad V"},
{113, "Hasselblad H"},
{114, "Contax 645"},
{115, "PhaseOne/Mamiya"},
{131, "Hasselblad V"},
{132, "Hasselblad H"},
{133, "Contax 645"},
{134, "PhaseOne/Mamiya"},
{135, "Hasselblad V"},
{136, "Hasselblad H"},
{137, "Contax 645"},
{138, "PhaseOne/Mamiya"},
{140, "Hasselblad V"},
{141, "Hasselblad H"},
{142, "Contax 645"},
{143, "PhaseOne/Mamiya"},
{148, "Hasselblad V"},
{149, "Hasselblad H"},
{150, "Contax 645"},
{151, "PhaseOne/Mamiya"},
{160, "A-250"},
{161, "A-260"},
{162, "A-280"},
{167, "Hasselblad V"},
{168, "Hasselblad H"},
{169, "Contax 645"},
{170, "PhaseOne/Mamiya"},
{172, "Hasselblad V"},
{173, "Hasselblad H"},
{174, "Contax 645"},
{175, "PhaseOne/Mamiya"},
{176, "Hasselblad V"},
{177, "Hasselblad H"},
{178, "Contax 645"},
{179, "PhaseOne/Mamiya"},
{180, "Hasselblad V"},
{181, "Hasselblad H"},
{182, "Contax 645"},
{183, "PhaseOne/Mamiya"},
{208, "Hasselblad V"},
{211, "PhaseOne/Mamiya"},
{448, "Phase One 645AF"},
{457, "Phase One 645DF"},
{471, "Phase One 645DF+"},
{704, "Phase One iXA"},
{705, "Phase One iXA - R"},
{706, "Phase One iXU 150"},
{707, "Phase One iXU 150 - NIR"},
{708, "Phase One iXU 180"},
{721, "Phase One iXR"},
// Leaf section:
{333,"Mamiya"},
{329,"Universal"},
{330,"Hasselblad H1/H2"},
{332,"Contax"},
{336,"AFi"},
{327,"Mamiya"},
{324,"Universal"},
{325,"Hasselblad H1/H2"},
{326,"Contax"},
{335,"AFi"},
{340,"Mamiya"},
{337,"Universal"},
{338,"Hasselblad H1/H2"},
{339,"Contax"},
{323,"Mamiya"},
{320,"Universal"},
{322,"Hasselblad H1/H2"},
{321,"Contax"},
{334,"AFi"},
{369,"Universal"},
{370,"Mamiya"},
{371,"Hasselblad H1/H2"},
{372,"Contax"},
{373,"Afi"},
};
imgdata.lens.makernotes.CamID = id;
if (id && !imgdata.lens.makernotes.body[0]) {
for (i=0; i < sizeof p1_unique / sizeof *p1_unique; i++)
if (id == p1_unique[i].id) {
strcpy(imgdata.lens.makernotes.body,p1_unique[i].t_model);
}
}
return;
}
void CLASS setSonyBodyFeatures (unsigned id) {
imgdata.lens.makernotes.CamID = id;
if ( // FF cameras
(id == 257) || // a900
(id == 269) || // a850
(id == 340) || // ILCE-7M2
(id == 318) || // ILCE-7S
(id == 311) || // ILCE-7R
(id == 306) || // ILCE-7
(id == 298) || // DSC-RX1
(id == 299) || // NEX-VG900
(id == 310) || // DSC-RX1R
(id == 294) // SLT-99, Hasselblad HV
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
else
{
if ((id != 002) && // DSC-R1
(id != 297) && // DSC-RX100
(id != 308) && // DSC-RX100M2
(id != 309) && // DSC-RX10
(id != 317)) // DSC-RX100M3
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
}
if ( // E-mount cameras
// ILCE:
(id == 302) ||
(id == 306) ||
(id == 311) ||
(id == 312) ||
(id == 313) ||
(id == 318) ||
(id == 339) ||
(id == 340) ||
(id == 346) ||
// NEX:
(id == 278) ||
(id == 279) ||
(id == 284) ||
(id == 288) ||
(id == 289) ||
(id == 290) ||
(id == 293) ||
(id == 295) ||
(id == 296) ||
(id == 299) ||
(id == 300) ||
(id == 305) ||
(id == 307)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Sony_E;
}
else if ( // A-mount cameras
// DSLR:
(id == 256) ||
(id == 257) ||
(id == 258) ||
(id == 259) ||
(id == 260) ||
(id == 261) ||
(id == 262) ||
(id == 263) ||
(id == 264) ||
(id == 265) ||
(id == 266) ||
(id == 269) ||
(id == 270) ||
(id == 273) ||
(id == 274) ||
(id == 275) ||
(id == 282) ||
(id == 283) ||
// SLT:
(id == 280) ||
(id == 281) ||
(id == 285) ||
(id == 286) ||
(id == 287) ||
(id == 291) ||
(id == 292) ||
(id == 294) ||
(id == 303) ||
// ILCA:
(id == 319)
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if ( // DSC
(id == 002) || // DSC-R1
(id == 297) || // DSC-RX100
(id == 298) || // DSC-RX1
(id == 308) || // DSC-RX100M2
(id == 309) || // DSC-RX10
(id == 310) || // DSC-RX1R
(id == 317) // DSC-RX100M3
)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS parseSonyLensType2 (uchar a, uchar b) {
ushort lid2;
lid2 = (((ushort)a)<<8) | ((ushort)b);
if (!lid2) return;
if (lid2 < 0x100)
{
imgdata.lens.makernotes.AdapterID = lid2;
switch (lid2) {
case 1:
case 2:
case 3:
case 6:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 44:
case 78:
case 239:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
break;
}
}
else
imgdata.lens.makernotes.LensID = lid2;
return;
}
void CLASS parseSonyLensFeatures (uchar a, uchar b) {
ushort features;
features = (((ushort)a)<<8) | ((ushort)b);
if ((imgdata.lens.makernotes.LensMount == LIBRAW_MOUNT_Canon_EF) || !features)
return;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
imgdata.lens.makernotes.LensFeatures_pre[0] = 0;
imgdata.lens.makernotes.LensFeatures_suf[0] = 0;
if ((features & 0x0200) && (features & 0x0100)) {
strcpy(imgdata.lens.makernotes.LensFeatures_pre, "E");
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
} else if (features & 0x0200) {
strcpy(imgdata.lens.makernotes.LensFeatures_pre, "FE");
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
} else if (features & 0x0100) {
strcpy(imgdata.lens.makernotes.LensFeatures_pre, "DT");
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
if (features & 0x4000)
strncat(imgdata.lens.makernotes.LensFeatures_pre, " PZ", sizeof(imgdata.lens.makernotes.LensFeatures_pre));
if (features & 0x0008)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " G", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
else if (features & 0x0004)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " ZA", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
if ((features & 0x0020) && (features & 0x0040))
strncat(imgdata.lens.makernotes.LensFeatures_suf, " Macro", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
else if (features & 0x0020)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " STF", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
else if (features & 0x0040)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " Reflex", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
else if (features & 0x0080)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " Fisheye", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
if (features & 0x0001)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " SSM", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
else if (features & 0x0002)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " SAM", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
if (features & 0x8000)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " OSS", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
if (features & 0x2000)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " LE", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
if (features & 0x0800)
strncat(imgdata.lens.makernotes.LensFeatures_suf, " II", sizeof(imgdata.lens.makernotes.LensFeatures_suf));
if (imgdata.lens.makernotes.LensFeatures_suf[0] == ' ')
memmove(imgdata.lens.makernotes.LensFeatures_suf, imgdata.lens.makernotes.LensFeatures_suf+1, strlen(imgdata.lens.makernotes.LensFeatures_suf));
return;
}
void CLASS process_Sony_0x940c (uchar * buf)
{
ushort lid2;
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (SonySubstitution[buf[0x0008]]) {
case 1:
case 5:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 4:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
break;
}
}
lid2 = (((ushort)SonySubstitution[buf[0x000a]])<<8) |
((ushort)SonySubstitution[buf[0x0009]]);
if ((lid2 > 0) && (lid2 < 32784))
parseSonyLensType2 (SonySubstitution[buf[0x000a]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0009]]);
return;
}
void CLASS process_Sony_0x9050 (uchar * buf, unsigned id)
{
ushort lid;
if ((imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_Sony_E) &&
(imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens))
{
if (buf[0])
imgdata.lens.makernotes.MaxAp =
my_roundf(powf64(2.0f, ((float)SonySubstitution[buf[0]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f;
if (buf[1])
imgdata.lens.makernotes.MinAp =
my_roundf(powf64(2.0f, ((float)SonySubstitution[buf[1]] / 8.0 - 1.06f) / 2.0f)*10.0f) / 10.0f;
}
if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
{
if (buf[0x3d] | buf[0x3c])
{
lid = SonySubstitution[buf[0x3d]] << 8 |
SonySubstitution[buf[0x3c]];
imgdata.lens.makernotes.CurAp =
powf64(2.0f, ((float)lid/256.0f - 16.0f) / 2.0f);
}
if (buf[0x105] && (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF))
imgdata.lens.makernotes.LensMount =
SonySubstitution[buf[0x105]];
if (buf[0x106])
imgdata.lens.makernotes.LensFormat =
SonySubstitution[buf[0x106]];
}
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
parseSonyLensType2 (SonySubstitution[buf[0x0108]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0107]]);
}
if ((imgdata.lens.makernotes.LensID == -1) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) &&
(buf[0x010a] | buf[0x0109]))
{
imgdata.lens.makernotes.LensID = // LensType - Minolta/Sony lens ids
SonySubstitution[buf[0x010a]] << 8 |
SonySubstitution[buf[0x0109]];
if ((imgdata.lens.makernotes.LensID > 61184) &&
(imgdata.lens.makernotes.LensID < 65535))
{
imgdata.lens.makernotes.LensID -= 61184;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
}
if ((id >= 286) && (id <= 293))
// "SLT-A65", "SLT-A77", "NEX-7", "NEX-VG20E",
// "SLT-A37", "SLT-A57", "NEX-F3", "Lunar"
parseSonyLensFeatures (SonySubstitution[buf[0x115]],
SonySubstitution[buf[0x116]]);
else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
parseSonyLensFeatures (SonySubstitution[buf[0x116]],
SonySubstitution[buf[0x117]]);
return;
}
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
unsigned offset = 0, entries, tag, type, len, save, c;
unsigned i;
uchar NikonKey, ci, cj, ck;
unsigned serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_present = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_present = 0;
short morder, sorder = order;
char buf[10];
fread(buf, 1, 10, ifp);
if (!strcmp(buf, "Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMPUS") ||
!strcmp(buf, "PENTAX ") ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG))) {
base = ftell(ifp) - 10;
fseek(ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
}
else if (!strncmp(buf, "SONY", 4) ||
!strcmp(buf, "Panasonic")) {
goto nf;
}
else if (!strncmp(buf, "FUJIFILM", 8)) {
base = ftell(ifp) - 10;
nf: order = 0x4949;
fseek(ifp, 2, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMP") ||
!strcmp(buf, "LEICA") ||
!strcmp(buf, "Ricoh") ||
!strcmp(buf, "EPSON"))
fseek(ifp, -2, SEEK_CUR);
else if (!strcmp(buf, "AOC") ||
!strcmp(buf, "QVC"))
fseek(ifp, -4, SEEK_CUR);
else {
fseek(ifp, -10, SEEK_CUR);
if ((!strncmp(make, "SAMSUNG", 7) &&
(dng_writer == AdobeDNG)))
base = ftell(ifp);
}
entries = get2();
// if (dng_writer == AdobeDNG)
// printf("\n*** parse_makernote_0xc634: AdobeDNG");
// else if (dng_writer == CameraDNG)
// printf("\n*** parse_makernote_0xc634: CameraDNG");
// printf ("\n\tbuf =%s=\n\tmake =%s=\n\tmodel =%s=\n\tbase: 0x%x\n\tentries: %d\n",
// buf, make, model, base, entries);
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get(base, &tag, &type, &len, &save);
tag |= uptag << 16;
// printf ("\n\tbase: 0x%x tag: 0x%04x type: 0x%x len: 0x%x pos: 0x%llx",
// base, tag, type, len, ftell(ifp));
if (!strcmp(make, "Canon"))
{
if (tag == 0x0001) // camera settings
{
fseek(ifp, 44, SEEK_CUR);
imgdata.lens.makernotes.LensID = get2();
imgdata.lens.makernotes.MaxFocal = get2();
imgdata.lens.makernotes.MinFocal = get2();
imgdata.lens.makernotes.CanonFocalUnits = get2();
if (imgdata.lens.makernotes.CanonFocalUnits != 1)
{
imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2());
imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2());
}
else if (tag == 0x0002) // focal length
{
imgdata.lens.makernotes.FocalType = get2();
imgdata.lens.makernotes.CurFocal = get2();
if ((imgdata.lens.makernotes.CanonFocalUnits != 1) &&
imgdata.lens.makernotes.CanonFocalUnits)
{
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
}
else if (tag == 0x0004) // shot info
{
fseek(ifp, 42, SEEK_CUR);
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2());
}
else if (tag == 0x000d) // camera info
{
CanonCameraInfo = (uchar*)malloc(len);
fread(CanonCameraInfo, len, 1, ifp);
lenCanonCameraInfo = len;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo) processCanonCameraInfo(unique_id, CanonCameraInfo);
}
else if (tag == 0x0095 && // lens model tag
!imgdata.lens.makernotes.Lens[0])
{
fread(imgdata.lens.makernotes.Lens, 2, 1, ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens
fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp);
else
{
char efs[2];
imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0];
imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1];
fread(efs, 2, 1, ifp);
if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77))
{ // "EF-S, TS-E, MP-E, EF-M" lenses
imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0];
imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1];
imgdata.lens.makernotes.Lens[4] = 32;
if (efs[1] == 83)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
else if (efs[1] == 77)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
}
}
else
{ // "EF" lenses
imgdata.lens.makernotes.Lens[2] = 32;
imgdata.lens.makernotes.Lens[3] = efs[0];
imgdata.lens.makernotes.Lens[4] = efs[1];
}
fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp);
}
}
}
else if (!strncmp(make, "FUJI", 4))
switch (tag) {
case 0x1404: imgdata.lens.makernotes.MinFocal = getreal(type); break;
case 0x1405: imgdata.lens.makernotes.MaxFocal = getreal(type); break;
case 0x1406: imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); break;
case 0x1407: imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); break;
}
else if (!strncasecmp(make, "LEICA", 5))
{
if ((tag == 0x0303) && (type != 4))
{
fread(imgdata.lens.makernotes.Lens, len, 1, ifp);
}
if ((tag == 0x3405) ||
(tag == 0x0310) ||
(tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID>>2)<<8) |
(imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') ||
!strncasecmp (model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') ||
!strncasecmp (model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (
((tag == 0x0313) || (tag == 0x34003406)) &&
(fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5))
)
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote (base, 0x3400);
}
}
else if (!strncmp(make, "NIKON", 5))
{
if (tag == 0x1d) // serial number
while ((c = fgetc(ifp)) && c != EOF)
serial = serial * 10 + (isdigit(c) ? c - '0' : c % 10);
else if (tag == 0x0082) // lens attachment
{
fread(imgdata.lens.makernotes.Attachment, len, 1, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
if (!(imgdata.lens.nikon.NikonLensType & 0x01))
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'A';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
if (imgdata.lens.nikon.NikonLensType & 0x02)
{
if (imgdata.lens.nikon.NikonLensType & 0x04)
imgdata.lens.makernotes.LensFeatures_suf[0] = 'G';
else
imgdata.lens.makernotes.LensFeatures_suf[0] = 'D';
imgdata.lens.makernotes.LensFeatures_suf[1] = ' ';
}
if (imgdata.lens.nikon.NikonLensType & 0x08)
{
imgdata.lens.makernotes.LensFeatures_suf[2] = 'V';
imgdata.lens.makernotes.LensFeatures_suf[3] = 'R';
}
if (imgdata.lens.nikon.NikonLensType & 0x10)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_CX;
}
if (imgdata.lens.nikon.NikonLensType & 0x20)
{
strcpy(imgdata.lens.makernotes.Adapter, "FT-1");
}
imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf;
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a*b*(12/c);
imgdata.lens.makernotes.LensFStops =
(float)imgdata.lens.nikon.NikonLensFStops /12.0f;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100: lenNikonLensData = 9; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203: lenNikonLensData = 15; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break;
case 204: lenNikonLensData = 16; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break;
case 400: lenNikonLensData = 459; break;
case 401: lenNikonLensData = 590; break;
case 402: lenNikonLensData = 509; break;
case 403: lenNikonLensData = 879; break;
}
table_buf = (uchar*)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
}
}
else if (tag == 0xa7) // shutter count
{
NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
}
}
else if (tag == 37 && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc, 1, 1, ifp);
iso_speed = (int)(100.0 * powf64(2.0, (double)(cc) / 12.0 - 5.0));
break;
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
if (tag == 0x2010)
{
fseek(ifp, save - 4, SEEK_SET);
fseek(ifp, base + get4(), SEEK_SET);
parse_makernote_0xc634(base, 0x2010, dng_writer);
}
switch (tag) {
case 0x0207:
case 0x20100100:
{
uchar sOlyID[7];
long unsigned OlyID;
fread (sOlyID, len, 1, ifp);
OlyID = sOlyID[0];
i = 1;
while (sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = powf64(2.0f, getreal(type)/2);
break;
case 0x20100201:
imgdata.lens.makernotes.LensID =
(unsigned long long)fgetc(ifp)<<16 |
(unsigned long long)(fgetc(ifp), fgetc(ifp))<<8 |
(unsigned long long)fgetc(ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) ||
(imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100203:
fread(imgdata.lens.makernotes.Lens, len, 1, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID =
imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
fread(imgdata.lens.makernotes.Teleconverter, len, 1, ifp);
break;
case 0x20100403:
fread(imgdata.lens.makernotes.Attachment, len, 1, ifp);
break;
}
}
else if (!strncmp(make, "PENTAX", 6) ||
!strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG)))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
if (
(dng_writer == CameraDNG) &&
(
(unique_id == 0x12f66) || // Q10
(unique_id == 0x12f7a) || // Q7
(unique_id == 0x12ee4) // Q
)
)
base += 10;
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2()/10.0f;
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f;
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x0207)
{
ushort iLensData = 0;
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID < 0x12b9c) ||
((imgdata.lens.makernotes.CamID == 0x12b9c) || // K100D
(imgdata.lens.makernotes.CamID == 0x12b9d) || // K110D
(imgdata.lens.makernotes.CamID == 0x12ba2) && // K100D Super
(!table_buf[20] || (table_buf[20] == 0xff))))
{
iLensData = 3;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
(((unsigned)table_buf[0]) << 8) + table_buf[1];
}
else switch (len)
{
case 90: // LensInfo3
iLensData = 13;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 91: // LensInfo4
iLensData = 12;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 80: // LensInfo5
case 128:
iLensData = 15;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) <<8) + table_buf[5];
break;
default:
if (imgdata.lens.makernotes.CamID >= 0x12b9c) // LensInfo2
{
iLensData = 4;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) <<8) + table_buf[3];
}
}
if (iLensData)
{
if (table_buf[iLensData+9] &&
(fabs(imgdata.lens.makernotes.CurFocal) < 0.1f))
imgdata.lens.makernotes.CurFocal =
10*(table_buf[iLensData+9]>>2) * powf64(4, (table_buf[iLensData+9] & 0x03)-2);
if (table_buf[iLensData+10] & 0xf0)
imgdata.lens.makernotes.MaxAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+10] & 0xf0) >>4)/4.0f);
if (table_buf[iLensData+10] & 0x0f)
imgdata.lens.makernotes.MinAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+10] & 0x0f) + 10)/4.0f);
if (
(imgdata.lens.makernotes.CamID != 0x12e6c) && // K-r
(imgdata.lens.makernotes.CamID != 0x12e76) && // K-5
(imgdata.lens.makernotes.CamID != 0x12f70) // K-5 II
// (imgdata.lens.makernotes.CamID != 0x12f71) // K-5 II s
)
{
switch (table_buf[iLensData] & 0x06)
{
case 0: imgdata.lens.makernotes.MinAp4MinFocal = 22.0f; break;
case 2: imgdata.lens.makernotes.MinAp4MinFocal = 32.0f; break;
case 4: imgdata.lens.makernotes.MinAp4MinFocal = 45.0f; break;
case 6: imgdata.lens.makernotes.MinAp4MinFocal = 16.0f; break;
}
if (table_buf[iLensData] & 0x70)
imgdata.lens.makernotes.LensFStops =
((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f;
if ((table_buf[iLensData+14] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
imgdata.lens.makernotes.MaxAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+14] & 0x7f) -1)/32.0f);
}
else if ((imgdata.lens.makernotes.CamID != 0x12e76) && // K-5
(table_buf[iLensData+15] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
{
imgdata.lens.makernotes.MaxAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+15] & 0x7f) -1)/32.0f);
}
}
free(table_buf);
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo [20];
fseek (ifp, 2, SEEK_CUR);
fread(imgdata.lens.makernotes.Lens, 30, 1, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
fread(LensInfo, 20, 1, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7) &&
(dng_writer == AdobeDNG))
{
if (tag == 0x0002)
{
if(get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
imgdata.lens.makernotes.CamID = unique_id = get4();
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) ||
!strncasecmp(make, "Konica", 6) ||
!strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "HV",2))))
{
ushort lid;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x9050_present)
{
process_Sony_0x9050(table_buf_0x9050, unique_id);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
if (table_buf_0x940c_present)
{
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
process_Sony_0x940c(table_buf_0x940c);
}
free (table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) &&
strncasecmp(model, "NEX-5C", 6) &&
!strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360))
)
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len) {
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if (saneSonyCameraInfo(table_buf[0], table_buf[3], table_buf[2], table_buf[5], table_buf[4], table_buf[7]))
{
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
}
break;
default:
// CameraInfo2 & 3
if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6]))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
}
free(table_buf);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114) // CameraSettings
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len) {
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2])<<8) |
((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp =
powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153]) {
case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break;
case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break;
}
}
break;
}
free(table_buf);
}
else if (tag == 0x9050) // little endian
{
table_buf_0x9050 = (uchar*)malloc(len);
table_buf_0x9050_present = 1;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
}
else if (tag == 0x940c)
{
table_buf_0x940c = (uchar*)malloc(len);
table_buf_0x940c_present = 1;
fread(table_buf_0x940c, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E))
{
process_Sony_0x940c(table_buf_0x940c);
free(table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 61184) &&
(imgdata.lens.makernotes.LensID < 65535))
{
imgdata.lens.makernotes.LensID -= 61184;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a) // Sony LensSpec
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6]))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
free(table_buf);
}
}
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
#else
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
/*placeholder */
}
#endif
void CLASS parse_makernote (int base, int uptag)
{
unsigned offset=0, entries, tag, type, len, save, c;
unsigned ver97=0, serial=0, i, wbi=0, wb[4]={0,0,0,0};
uchar buf97[324], ci, cj, ck;
short morder, sorder=order;
char buf[10];
unsigned SamsungKey[11];
static const double rgb_adobe[3][3] = // inv(sRGB2XYZ_D65) * AdobeRGB2XYZ_D65
{{ 1.398283396477404, -0.398283116703571, 4.427165001263944E-08},
{-1.233904514232401E-07, 0.999999995196570, 3.126724276714121e-08},
{ 4.561487232726535E-08, -0.042938290466635, 1.042938250416105 }};
float adobe_cam [3][3];
uchar NikonKey;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_present = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_present = 0;
#endif
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strcmp(make,"Nokia")) return;
fread (buf, 1, 10, ifp);
if (!strncmp (buf,"KDK" ,3) || /* these aren't TIFF tables */
!strncmp (buf,"VER" ,3) ||
!strncmp (buf,"IIII",4) ||
!strncmp (buf,"MMMM",4)) return;
if (!strncmp (buf,"KC" ,2) || /* Konica KD-400Z, KD-510Z */
!strncmp (buf,"MLY" ,3)) { /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i=ftell(ifp)) < data_offset && i < 16384) {
wb[0] = wb[2]; wb[2] = wb[1]; wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 &&
wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp (buf,"Nikon")) {
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek (ifp, offset-8, SEEK_CUR);
} else if (!strcmp (buf,"OLYMPUS") ||
!strcmp (buf,"PENTAX ")) {
base = ftell(ifp)-10;
fseek (ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O') get2();
} else if (!strncmp (buf,"SONY",4) ||
!strcmp (buf,"Panasonic")) {
goto nf;
} else if (!strncmp (buf,"FUJIFILM",8)) {
base = ftell(ifp)-10;
nf: order = 0x4949;
fseek (ifp, 2, SEEK_CUR);
} else if (!strcmp (buf,"OLYMP") ||
!strcmp (buf,"LEICA") ||
!strcmp (buf,"Ricoh") ||
!strcmp (buf,"EPSON"))
fseek (ifp, -2, SEEK_CUR);
else if (!strcmp (buf,"AOC") ||
!strcmp (buf,"QVC"))
fseek (ifp, -4, SEEK_CUR);
else {
fseek (ifp, -10, SEEK_CUR);
if (!strncmp(make,"SAMSUNG",7))
base = ftell(ifp);
}
// adjust pos & base for Leica M8/M9/M Mono tags and dir in tag 0x3400
if (!strncasecmp(make, "LEICA", 5))
{
if (!strncmp(model, "M8", 2) ||
!strncasecmp(model, "Leica M8", 8) ||
!strncasecmp(model, "LEICA X", 7))
{
base = ftell(ifp)-8;
}
else if (!strncasecmp(model, "LEICA M (Typ 240)", 17))
{
base = 0;
}
else if (!strncmp(model, "M9", 2) ||
!strncasecmp(model, "Leica M9", 8) ||
!strncasecmp(model, "M Monochrom", 11) ||
!strncasecmp(model, "Leica M Monochrom", 11))
{
if (!uptag)
{
base = ftell(ifp) - 10;
fseek (ifp, 8, SEEK_CUR);
}
else if (uptag == 0x3400)
{
fseek (ifp, 10, SEEK_CUR);
base += 10;
}
}
else if (!strncasecmp(model, "LEICA T", 7))
{
base = ftell(ifp)-8;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_T;
#endif
}
}
entries = get2();
// printf("\n*** parse_makernote\n\tmake =%s=\n\tmodel =%s= \n\tentries: %d\n\tpos: 0x%llx\n",
// make, model, entries, ftell(ifp));
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get (base, &tag, &type, &len, &save);
tag |= uptag << 16;
// printf ("\n\tbase: 0x%x tag: 0x%04x type: 0x%x len: 0x%x pos: 0x%llx",
// base, tag, type, len, ftell(ifp));
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(make, "Canon"))
{
if (tag == 0x0001) // camera settings
{
fseek(ifp, 44, SEEK_CUR);
imgdata.lens.makernotes.LensID = get2();
imgdata.lens.makernotes.MaxFocal = get2();
imgdata.lens.makernotes.MinFocal = get2();
imgdata.lens.makernotes.CanonFocalUnits = get2();
if (imgdata.lens.makernotes.CanonFocalUnits != 1)
{
imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2());
imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2());
}
else if (tag == 0x0002) // focal length
{
imgdata.lens.makernotes.FocalType = get2();
imgdata.lens.makernotes.CurFocal = get2();
if ((imgdata.lens.makernotes.CanonFocalUnits != 1) &&
imgdata.lens.makernotes.CanonFocalUnits)
{
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
}
else if (tag == 0x0004) // shot info
{
fseek(ifp, 42, SEEK_CUR);
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2());
}
else if (tag == 0x000d) // camera info
{
CanonCameraInfo = (uchar*)malloc(len);
fread(CanonCameraInfo, len, 1, ifp);
lenCanonCameraInfo = len;
}
else if (tag == 0x0095 && // lens model tag
!imgdata.lens.makernotes.Lens[0])
{
fread(imgdata.lens.makernotes.Lens, 2, 1, ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens
fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp);
else
{
char efs[2];
imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0];
imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1];
fread(efs, 2, 1, ifp);
if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77))
{ // "EF-S, TS-E, MP-E, EF-M" lenses
imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0];
imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1];
imgdata.lens.makernotes.Lens[4] = 32;
if (efs[1] == 83)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
else if (efs[1] == 77)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
}
}
else
{ // "EF" lenses
imgdata.lens.makernotes.Lens[2] = 32;
imgdata.lens.makernotes.Lens[3] = efs[0];
imgdata.lens.makernotes.Lens[4] = efs[1];
}
fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp);
}
}
}
else if (!strncmp(make, "FUJI", 4))
switch (tag) {
case 0x1404: imgdata.lens.makernotes.MinFocal = getreal(type); break;
case 0x1405: imgdata.lens.makernotes.MaxFocal = getreal(type); break;
case 0x1406: imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type); break;
case 0x1407: imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type); break;
}
else if (!strncasecmp(make, "LEICA", 5))
{
if ((tag == 0x0303) && (type != 4))
{
fread(imgdata.lens.makernotes.Lens, len, 1, ifp);
}
if ((tag == 0x3405) ||
(tag == 0x0310) ||
(tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID>>2)<<8) |
(imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') ||
!strncasecmp (model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') ||
!strncasecmp (model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (
((tag == 0x0313) || (tag == 0x34003406)) &&
(fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5))
)
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote (base, 0x3400);
}
}
else if (!strncmp(make, "NIKON",5))
{
if (tag == 0x0082) // lens attachment
{
fread(imgdata.lens.makernotes.Attachment, len, 1, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
if (!(imgdata.lens.nikon.NikonLensType & 0x01))
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'A';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
if (imgdata.lens.nikon.NikonLensType & 0x02)
{
if (imgdata.lens.nikon.NikonLensType & 0x04)
imgdata.lens.makernotes.LensFeatures_suf[0] = 'G';
else
imgdata.lens.makernotes.LensFeatures_suf[0] = 'D';
imgdata.lens.makernotes.LensFeatures_suf[1] = ' ';
}
if (imgdata.lens.nikon.NikonLensType & 0x08)
{
imgdata.lens.makernotes.LensFeatures_suf[2] = 'V';
imgdata.lens.makernotes.LensFeatures_suf[3] = 'R';
}
if (imgdata.lens.nikon.NikonLensType & 0x10)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_CX;
}
if (imgdata.lens.nikon.NikonLensType & 0x20)
{
strcpy(imgdata.lens.makernotes.Adapter, "FT-1");
}
imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf;
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a*b*(12/c);
imgdata.lens.makernotes.LensFStops =
(float)imgdata.lens.nikon.NikonLensFStops /12.0f;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100: lenNikonLensData = 9; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203: lenNikonLensData = 15; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break;
case 204: lenNikonLensData = 16; imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F; break;
case 400: lenNikonLensData = 459; break;
case 401: lenNikonLensData = 590; break;
case 402: lenNikonLensData = 509; break;
case 403: lenNikonLensData = 879; break;
}
table_buf = (uchar*)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
}
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
switch (tag) {
case 0x0207:
case 0x20100100:
{
uchar sOlyID[7];
long unsigned OlyID;
fread (sOlyID, len, 1, ifp);
OlyID = sOlyID[0];
i = 1;
while (sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = powf64(2.0f, getreal(type)/2);
break;
case 0x20100201:
imgdata.lens.makernotes.LensID =
(unsigned long long)fgetc(ifp)<<16 |
(unsigned long long)(fgetc(ifp), fgetc(ifp))<<8 |
(unsigned long long)fgetc(ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) ||
(imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100203:
fread(imgdata.lens.makernotes.Lens, len, 1, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = powf64(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID =
imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
fread(imgdata.lens.makernotes.Teleconverter, len, 1, ifp);
break;
case 0x20100403:
fread(imgdata.lens.makernotes.Attachment, len, 1, ifp);
break;
}
}
else if (!strncmp(make, "PENTAX", 6) &&
!strncmp(model, "GR", 2))
{
if ((tag == 0x1001) && (type == 3))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
imgdata.lens.makernotes.FocalType = 1;
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
}
else if (!strncmp(make, "RICOH", 5) &&
strncmp(model, "PENTAX", 6))
{
if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
else if (tag == 0x2001)
{
short ntags, cur_tag;
fseek(ifp, 20, SEEK_CUR);
ntags = get2();
cur_tag = get2();
while (cur_tag != 0x002c)
{
fseek(ifp, 10, SEEK_CUR);
cur_tag = get2();
}
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, get4()+34, SEEK_SET);
imgdata.lens.makernotes.LensID = getc(ifp) - '0';
switch(imgdata.lens.makernotes.LensID)
{
case 1:
case 2:
case 3:
case 5:
case 6:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_RicohModule;
break;
case 8:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
break;
default:
imgdata.lens.makernotes.LensID = -1;
}
}
}
else if (!strncmp(make, "PENTAX", 6) ||
!strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && dng_version) &&
strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2()/10.0f;
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4()/100.0f;
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x0207)
{
ushort iLensData = 0;
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID < 0x12b9c) ||
((imgdata.lens.makernotes.CamID == 0x12b9c) || // K100D
(imgdata.lens.makernotes.CamID == 0x12b9d) || // K110D
(imgdata.lens.makernotes.CamID == 0x12ba2) && // K100D Super
(!table_buf[20] || (table_buf[20] == 0xff))))
{
iLensData = 3;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
(((unsigned)table_buf[0]) << 8) + table_buf[1];
}
else switch (len)
{
case 90: // LensInfo3
iLensData = 13;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 91: // LensInfo4
iLensData = 12;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) <<8) + table_buf[4];
break;
case 80: // LensInfo5
case 128:
iLensData = 15;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) <<8) + table_buf[5];
break;
default:
if (imgdata.lens.makernotes.CamID >= 0x12b9c) // LensInfo2
{
iLensData = 4;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID =
((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) <<8) + table_buf[3];
}
}
if (iLensData)
{
if (table_buf[iLensData+9] && (fabs(imgdata.lens.makernotes.CurFocal) < 0.1f))
imgdata.lens.makernotes.CurFocal =
10*(table_buf[iLensData+9]>>2) * powf64(4, (table_buf[iLensData+9] & 0x03)-2);
if (table_buf[iLensData+10] & 0xf0)
imgdata.lens.makernotes.MaxAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+10] & 0xf0) >>4)/4.0f);
if (table_buf[iLensData+10] & 0x0f)
imgdata.lens.makernotes.MinAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+10] & 0x0f) + 10)/4.0f);
if (
(imgdata.lens.makernotes.CamID != 0x12e6c) && // K-r
(imgdata.lens.makernotes.CamID != 0x12e76) && // K-5
(imgdata.lens.makernotes.CamID != 0x12f70) // K-5 II
// (imgdata.lens.makernotes.CamID != 0x12f71) // K-5 II s
)
{
switch (table_buf[iLensData] & 0x06)
{
case 0: imgdata.lens.makernotes.MinAp4MinFocal = 22.0f; break;
case 2: imgdata.lens.makernotes.MinAp4MinFocal = 32.0f; break;
case 4: imgdata.lens.makernotes.MinAp4MinFocal = 45.0f; break;
case 6: imgdata.lens.makernotes.MinAp4MinFocal = 16.0f; break;
}
if (table_buf[iLensData] & 0x70)
imgdata.lens.makernotes.LensFStops =
((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f;
if ((table_buf[iLensData+14] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
imgdata.lens.makernotes.MaxAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+14] & 0x7f) -1)/32.0f);
}
else if ((imgdata.lens.makernotes.CamID != 0x12e76) && // K-5
(table_buf[iLensData+15] > 1) &&
(fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
{
imgdata.lens.makernotes.MaxAp4CurFocal =
powf64(2.0f, (float)((table_buf[iLensData+15] & 0x7f) -1)/32.0f);
}
}
free(table_buf);
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo [20];
fseek (ifp, 2, SEEK_CUR);
fread(imgdata.lens.makernotes.Lens, 30, 1, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
fread(LensInfo, 20, 1, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7))
{
if (tag == 0x0002)
{
if(get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
unique_id = imgdata.lens.makernotes.CamID = get4();
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) ||
!strncasecmp(make, "Konica", 6) ||
!strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) ||
!strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "HV",2))))
{
ushort lid;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x9050_present)
{
process_Sony_0x9050(table_buf_0x9050, unique_id);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
if (table_buf_0x940c_present)
{
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
process_Sony_0x940c(table_buf_0x940c);
}
free (table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) &&
strncasecmp(model, "NEX-5C", 6) &&
!strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360))
)
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len)
{
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
break;
default:
// CameraInfo2 & 3
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
free(table_buf);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114) // CameraSettings
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len) {
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2])<<8) |
((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp =
powf64(2.0f, ((float)lid/8.0f-1.0f)/2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153]) {
case 16: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A; break;
case 17: imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E; break;
}
}
break;
}
free(table_buf);
}
else if (tag == 0x9050) // little endian
{
table_buf_0x9050 = (uchar*)malloc(len);
table_buf_0x9050_present = 1;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, imgdata.lens.makernotes.CamID);
free (table_buf_0x9050);
table_buf_0x9050_present = 0;
}
}
else if (tag == 0x940c)
{
table_buf_0x940c = (uchar*)malloc(len);
table_buf_0x940c_present = 1;
fread(table_buf_0x940c, len, 1, ifp);
if ((imgdata.lens.makernotes.CamID) &&
(imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E))
{
process_Sony_0x940c(table_buf_0x940c);
free(table_buf_0x940c);
table_buf_0x940c_present = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 61184) &&
(imgdata.lens.makernotes.LensID < 65535))
{
imgdata.lens.makernotes.LensID -= 61184;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c) imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a) // Sony LensSpec
{
table_buf = (uchar*)malloc(len);
fread(table_buf, len, 1, ifp);
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal =
bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal =
bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
free(table_buf);
}
}
#endif
if (tag == 2 && strstr(make,"NIKON") && !iso_speed)
iso_speed = (get2(),get2());
if (tag == 37 && strstr(make,"NIKON") && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc,1,1,ifp);
iso_speed = int(100.0 * powf64(2.0f,float(cc)/12.0-5.0));
}
if (tag == 4 && len > 26 && len < 35) {
if ((i=(get4(),get2())) != 0x7fff && (!iso_speed || iso_speed == 65535))
iso_speed = 50 * powf64(2.0, i/32.0 - 4);
if ((i=(get2(),get2())) != 0x7fff && !aperture)
aperture = powf64(2.0, i/64.0);
if ((i=get2()) != 0xffff && !shutter)
shutter = powf64(2.0, (short) i/-32.0);
wbi = (get2(),get2());
shot_order = (get2(),get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make,"KONICA",6)) {
fseek (ifp, tag == 4 ? 140:160, SEEK_CUR);
switch (get2()) {
case 72: flip = 0; break;
case 76: flip = 6; break;
case 82: flip = 5; break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets (model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strcmp(make,"Canon"))
fread (artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa) {
for (c=i=2; (ushort) c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i+=4) < len-5)
if (get4() == 257 && (i=len) && (c = (get4(),fgetc(ifp))) < 3)
flip = "065"[c]-'0';
}
if (tag == 0x10 && type == 4)
{
unique_id = get4();
#ifdef LIBRAW_LIBRARY_BUILD
setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo) processCanonCameraInfo(unique_id, CanonCameraInfo);
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
if(tag == 0x20400805 && len == 2 && !strncasecmp(make,"Olympus",7))
{
imgdata.color.OlympusSensorCalibration[0]=getreal(type);
imgdata.color.OlympusSensorCalibration[1]=getreal(type);
}
if (tag == 0x4001 && len > 500 && !strcasecmp(make,"Canon"))
{
long int save1 = ftell(ifp);
switch (len)
{
case 582:
imgdata.color.canon_makernotes.CanonColorDataVer = 1; // 20D / 350D
break;
case 653:
imgdata.color.canon_makernotes.CanonColorDataVer = 2; // 1Dmk2 / 1DsMK2
break;
case 796:
imgdata.color.canon_makernotes.CanonColorDataVer = 3; // 1DmkIIN / 5D / 30D / 400D
// 1DmkIII / 1DSmkIII / 1DmkIV / 5DmkII
// 7D / 40D / 50D / 60D / 450D / 500D
// 550D / 1000D / 1100D
case 674: case 692: case 702: case 1227: case 1250:
case 1251: case 1337: case 1338: case 1346:
imgdata.color.canon_makernotes.CanonColorDataVer = 4;
imgdata.color.canon_makernotes.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x0e7<<1), SEEK_SET); // offset 231 short
int bls=0;
FORC4 bls+=get2();
imgdata.color.canon_makernotes.AverageBlackLevel = bls/4;
}
if ((imgdata.color.canon_makernotes.CanonColorDataSubVer == 4)
|| (imgdata.color.canon_makernotes.CanonColorDataSubVer == 5))
{
fseek (ifp, save1+(0x2b9<<1), SEEK_SET); // offset 697 shorts
imgdata.color.canon_makernotes.SpecularWhiteLevel = get2();
}
else if ((imgdata.color.canon_makernotes.CanonColorDataSubVer == 6) ||
(imgdata.color.canon_makernotes.CanonColorDataSubVer == 7))
{
fseek (ifp, save1+(0x2d0<<1), SEEK_SET); // offset 720 shorts
imgdata.color.canon_makernotes.SpecularWhiteLevel = get2();
}
else if (imgdata.color.canon_makernotes.CanonColorDataSubVer == 9)
{
fseek (ifp, save1+(0x2d4<<1), SEEK_SET); // offset 724 shorts
imgdata.color.canon_makernotes.SpecularWhiteLevel = get2();
}
break;
case 5120:
imgdata.color.canon_makernotes.CanonColorDataVer = 5; // PowerSot G10
break;
case 1273: case 1275:
imgdata.color.canon_makernotes.CanonColorDataVer = 6; // 600D / 1200D
imgdata.color.canon_makernotes.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x0fb<<1), SEEK_SET); // offset 251 short
int bls=0;
FORC4 bls+=get2();
imgdata.color.canon_makernotes.AverageBlackLevel = bls/4;
}
fseek (ifp, save1+(0x1e4<<1), SEEK_SET); // offset 484 shorts
imgdata.color.canon_makernotes.SpecularWhiteLevel = get2();
break;
// 1DX / 5DmkIII / 6D / 100D / 650D / 700D / M / 7DmkII / 750D / 760D
case 1312: case 1313: case 1316: case 1506:
imgdata.color.canon_makernotes.CanonColorDataVer = 7;
imgdata.color.canon_makernotes.CanonColorDataSubVer = get2();
{
fseek (ifp, save1+(0x114<<1), SEEK_SET); // offset 276 shorts
int bls=0;
FORC4 bls+=get2();
imgdata.color.canon_makernotes.AverageBlackLevel = bls/4;
}
if (imgdata.color.canon_makernotes.CanonColorDataSubVer == 10)
{
fseek (ifp, save1+(0x1fd<<1), SEEK_SET); // offset 509 shorts
imgdata.color.canon_makernotes.SpecularWhiteLevel = get2();
} else if (imgdata.color.canon_makernotes.CanonColorDataSubVer == 11)
{
fseek (ifp, save1+(0x2dd<<1), SEEK_SET); // offset 733 shorts
imgdata.color.canon_makernotes.SpecularWhiteLevel = get2();
}
break;
}
fseek (ifp, save1, SEEK_SET);
}
#endif
if (tag == 0x11 && is_raw && !strncmp(make,"NIKON",5)) {
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
}
if (tag == 0x14 && type == 7) {
if (len == 2560) {
fseek (ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread (buf, 1, 10, ifp);
if (!strncmp(buf,"NRW ",4)) {
fseek (ifp, strcmp(buf+4,"0100") ? 46:1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread (model, 64, 1, ifp);
if (strstr(make,"PENTAX")) {
if (tag == 0x1b) tag = 0x1018;
if (tag == 0x1c) tag = 0x1017;
}
if (tag == 0x1d)
while ((c = fgetc(ifp)) && c != EOF)
serial = serial*10 + (isdigit(c) ? c - '0' : c % 10);
if (tag == 0x29 && type == 1) { // Canon PowerShot G9
c = wbi < 18 ? "012347800000005896"[wbi]-'0' : 0;
fseek (ifp, 8 + c*32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
#ifndef LIBRAW_LIBRARY_BUILD
// works for some files, but not all
if (tag == 0x3d && type == 3 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2() >> (14-tiff_ifd[2].bps);
#endif
if (tag == 0x81 && type == 4) {
data_offset = get4();
fseek (ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if ((tag == 0x81 && type == 7) ||
(tag == 0x100 && type == 7) ||
(tag == 0x280 && type == 1)) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97) {
for (i=0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp)-'0';
switch (ver97) {
case 100:
fseek (ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek (ifp, 6, SEEK_CUR);
goto get2_rggb;
case 103:
fseek (ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200) {
if (ver97 != 205) fseek (ifp, 280, SEEK_CUR);
fread (buf97, 324, 1, ifp);
}
}
if (tag == 0xa1 && type == 7) {
order = 0x4949;
fseek (ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3) {
fseek (ifp, wbi*48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7) { // shutter count
NikonKey = fgetc(ifp)^fgetc(ifp)^fgetc(ifp)^fgetc(ifp);
if ( (unsigned) (ver97-200) < 17) {
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i=0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97-200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] =
sget2 (buf97 + (i & -2) + c*2);
}
#ifdef LIBRAW_LIBRARY_BUILD
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
}
#endif
}
if(tag == 0xb001 && type == 3) // Sony ModelID
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(),get4());
if (tag == 0x200 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4)
goto get2_rggb;
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
// not corrected for file bitcount, to be patched in open_datastream
if (tag == 0x03d && strstr(make,"NIKON") && len == 4)
{
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black += i;
}
#endif
if (tag == 0xe01) { /* Nikon Capture Note */
order = 0x4949;
fseek (ifp, 22, SEEK_CUR);
for (offset=22; offset+22 < len; offset += 22+i) {
tag = get4();
fseek (ifp, 14, SEEK_CUR);
i = get4()-4;
if (tag == 0x76a43207) flip = get2();
else fseek (ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7) {
fseek (ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7) {
if (len == 614)
fseek (ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek (ifp, 148, SEEK_CUR);
else goto next;
goto get2_256;
}
if ((tag == 0x1011 && len == 9) || tag == 0x20400200)
{
if(!strncasecmp(make,"Olympus", 7))
{
int j,k;
for (i=0; i < 3; i++)
FORC3 adobe_cam[i][c] = ((short) get2()) / 256.0;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_adobe[i][k] * adobe_cam[k][j];
}
else
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 256.0;
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2) {
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13))
fseek (ifp, get4()+base, SEEK_SET);
if (tag == 0x2020)
parse_thumb_note (base, 257, 258);
if (tag == 0x2040)
parse_makernote (base, 0x2040);
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
if (tag == 0x2010)
{
parse_makernote(base, 0x2010);
}
// IB end
#endif
if (tag == 0xb028) {
fseek (ifp, get4()+base, SEEK_SET);
parse_thumb_note (base, 136, 137);
}
if (tag == 0x4001 && len > 500) {
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek (ifp, i, SEEK_CUR);
get2_rggb:
FORC4 cam_mul[c ^ (c >> 1)] = get2();
i = len >> 3 == 164 || len == 1506 ? 112:22;
fseek (ifp, i, SEEK_CUR);
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
}
if(!strcasecmp(make,"Samsung"))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i=0; i<11; i++) SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
if (tag == 0xa030 && len == 9) // get and decode Samsung color matrix
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = (short)((get4() + SamsungKey[i*3+c]))/256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
if (tag == 0x4021 && get4() && get4())
FORC4 cam_mul[c] = 1024;
next:
fseek (ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif (int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo,ape;
kodak = !strncmp(make,"EASTMAN",7) && tiff_nifds < 3;
entries = get2();
if(!strcmp(make,"Hasselblad") && (tiff_nifds > 3) && (entries > 512)) return;
// printf("\n*** in parse_exif, make: =%s= model: =%s=", make, model);
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
// printf("\n\ttag: %x", tag);
#ifdef LIBRAW_LIBRARY_BUILD
if(callbacks.exif_cb)
{
int savepos = ftell(ifp);
callbacks.exif_cb(callbacks.exifparser_data,tag,type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
switch (tag) {
#ifdef LIBRAW_LIBRARY_BUILD
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.dng.MinFocal = getreal(type);
imgdata.lens.dng.MaxFocal = getreal(type);
imgdata.lens.dng.MaxAp4MinFocal = getreal(type);
imgdata.lens.dng.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
fread(imgdata.lens.LensMake, MIN(len,sizeof(imgdata.lens.LensMake)), 1, ifp);
break;
case 0xa434: // LensModel
fread(imgdata.lens.Lens, MIN(len, sizeof(imgdata.lens.LensMake)), 1, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = powf64(2.0f, (getreal(type) / 2.0f));
break;
#endif
case 33434: shutter = getreal(type); break;
case 33437: aperture = getreal(type); break;
case 34855: iso_speed = get2(); break;
case 34866:
if (iso_speed == 0xffff && (!strcasecmp(make, "SONY") || !strcasecmp(make, "CANON")))
iso_speed = getreal(type);
break;
case 36867:
case 36868: get_timestamp(0); break;
case 37377: if ((expo = -getreal(type)) < 128 && shutter == 0.)
shutter = powf64(2.0, expo); break;
case 37378:
if (fabs(ape = getreal(type))<256.0)
aperture = powf64(2.0, ape/2);
break;
case 37385: flash_used = getreal(type); break;
case 37386: focal_len = getreal(type); break;
case 37500: parse_makernote (base, 0); break; // tag 0x927c
case 40962: if (kodak) raw_width = get4(); break;
case 40963: if (kodak) raw_height = get4(); break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa=c=0; c < 8; c+=2)
exif_cfa |= fgetc(ifp) * 0x01010101 << c;
}
fseek (ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS parse_gps_libraw(int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
if (entries > 0)
imgdata.other.parsed_gps.gpsparsed = 1;
while (entries--) {
tiff_get(base, &tag, &type, &len, &save);
switch (tag) {
case 1: imgdata.other.parsed_gps.latref = getc(ifp); break;
case 3: imgdata.other.parsed_gps.longref = getc(ifp); break;
case 5: imgdata.other.parsed_gps.altref = getc(ifp); break;
case 2:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.latitude[c] = getreal(type);
break;
case 4:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.longtitude[c] = getreal(type);
break;
case 7:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.gpstimestamp[c] = getreal(type);
break;
case 6:
imgdata.other.parsed_gps.altitude = getreal(type);
break;
case 9: imgdata.other.parsed_gps.gpsstatus = getc(ifp); break;
}
fseek(ifp, save, SEEK_SET);
}
}
#endif
void CLASS parse_gps (int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
switch (tag) {
case 1: case 3: case 5:
gpsdata[29+tag/2] = getc(ifp); break;
case 2: case 4: case 7:
FORC(6) gpsdata[tag/3*6+c] = get4(); break;
case 6:
FORC(2) gpsdata[18+c] = get4(); break;
case 18: case 29:
fgets ((char *) (gpsdata+14+tag/3), MIN(len,12), ifp);
}
fseek (ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff (float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{ { 2.034193, -0.727420, -0.306766 },
{ -0.228811, 1.231729, -0.002922 },
{ -0.008565, -0.153273, 1.161839 } };
int i, j, k;
for (i=0; i < 3; i++)
for (j=0; j < 3; j++)
for (cmatrix[i][j] = k=0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.digitalBack_color=1;
#endif
}
void CLASS parse_mos (int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes=0, frot=0;
static const char *mod[] =
{ "","DCB2","Volare","Cantare","CMost","Valeo 6","Valeo 11","Valeo 22",
"Valeo 11p","Valeo 17","","Aptus 17","Aptus 22","Aptus 75","Aptus 65",
"Aptus 54S","Aptus 65S","Aptus 75S","AFi 5","AFi 6","AFi 7",
"Aptus-II 7","","","Aptus-II 6","","","Aptus-II 10","Aptus-II 5",
"","","","","Aptus-II 10R","Aptus-II 8","","Aptus-II 12","","AFi-II 12" };
float romm_cam[3][3];
fseek (ifp, offset, SEEK_SET);
while (1) {
if (get4() != 0x504b5453) break;
get4();
fread (data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(data,"CameraObj_camera_type")) {
fread(imgdata.lens.makernotes.body, skip, 1, ifp);
}
#endif
// IB end
if (!strcmp(data,"JPEG_preview_data")) {
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data,"icc_camera_profile")) {
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data,"ShootObj_back_type")) {
fscanf (ifp, "%d", &i);
if ((unsigned) i < sizeof mod / sizeof (*mod))
strcpy (model, mod[i]);
}
if (!strcmp(data,"icc_camera_to_tone_matrix")) {
for (i=0; i < 9; i++)
romm_cam[0][i] = int_to_float(get4());
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_color_matrix")) {
for (i=0; i < 9; i++)
fscanf (ifp, "%f", &romm_cam[0][i]);
romm_coeff (romm_cam);
}
if (!strcmp(data,"CaptProf_number_of_planes"))
fscanf (ifp, "%d", &planes);
if (!strcmp(data,"CaptProf_raw_data_rotation"))
fscanf (ifp, "%d", &flip);
if (!strcmp(data,"CaptProf_mosaic_pattern"))
FORC4 {
fscanf (ifp, "%d", &i);
if (i == 1) frot = c ^ (c >> 1);
}
if (!strcmp(data,"ImgProf_rotation_angle")) {
fscanf (ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data,"NeutObj_neutrals") && !cam_mul[0]) {
FORC4 fscanf (ifp, "%d", neut+c);
FORC3 cam_mul[c] = (float) neut[0] / neut[c+1];
}
if (!strcmp(data,"Rows_data"))
load_flags = get4();
parse_mos (from);
fseek (ifp, skip+from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101 *
(uchar) "\x94\x61\x16\x49"[(flip/90 + frot) & 3];
}
void CLASS linear_table (unsigned len)
{
int i;
if (len > 0x10000) len = 0x10000;
read_shorts (curve, len);
for (i=len; i < 0x10000; i++)
curve[i] = curve[i-1];
maximum = curve[len<0x1000?0xfff:len-1];
}
#ifdef LIBRAW_LIBRARY_BUILD
/* Thanks to Alexey Danilchenko for wb as-shot parsing code */
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
if(callbacks.exif_cb)
{
int savepos = ftell(ifp);
callbacks.exif_cb(callbacks.exifparser_data,tag | 0x20000,type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / get2();
wbi = -2;
}
if (tag == 2120 + wbi ||
(wbi<0 && tag == 2125)) /* use Auto WB if illuminant index is not set */
{
FORC3 mul[c] = (num=getreal(type))==0 ? 1 : num;
FORC3 cam_mul[c] = mul[1] / mul[c]; /* normalise against green */
}
if (tag == 2317) linear_table (len);
if (tag == 0x903) iso_speed = getreal(type);
//if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#else
void CLASS parse_kodak_ifd (int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi=-2, wbtemp=6500;
float mul[3]={1,1,1}, num;
static const int wbtag[] = { 64037,64040,64039,64041,-1,-1,64042 };
entries = get2();
if (entries > 1024) return;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
if (tag == 1020) wbi = getint(type);
if (tag == 1021 && len == 72) { /* WB set in software */
fseek (ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / get2();
wbi = -2;
}
if (tag == 2118) wbtemp = getint(type);
if (tag == 2120 + wbi && wbi >= 0)
FORC3 cam_mul[c] = 2048.0 / getreal(type);
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3 {
for (num=i=0; i < 4; i++)
num += getreal(type) * pow (wbtemp/100.0, i);
cam_mul[c] = 2048 / (num * mul[c]);
}
if (tag == 2317) linear_table (len);
if (tag == 6020) iso_speed = getint(type);
if (tag == 64013) wbi = fgetc(ifp);
if ((unsigned) wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019) width = getint(type);
if (tag == 64020) height = (getint(type)+1) & -2;
fseek (ifp, save, SEEK_SET);
}
}
#endif
//@end COMMON
void CLASS parse_minolta (int base);
int CLASS parse_tiff (int base);
//@out COMMON
int CLASS parse_tiff_ifd (int base)
{
unsigned entries, tag, type, len, plen=16, save;
int ifd, use_cm=0, cfa, i, j, c, ima_len=0;
char *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = { 0,1,2,3 }, tab[256];
double cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[]={ 1,1,1,1 }, asn[] = { 0,0,0,0 }, xyz[] = { 1,1,1 };
unsigned sony_curve[] = { 0,0,0,0,0,4095 };
unsigned *buf, sony_offset=0, sony_length=0, sony_key=0;
struct jhead jh;
int pana_raw = 0;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *sfp;
#endif
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j=0; j < 4; j++)
for (i=0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512) return 1;
while (entries--) {
tiff_get (base, &tag, &type, &len, &save);
// printf ("\n*** parse_tiff_ifd tag: 0x%04x", tag);
#ifdef LIBRAW_LIBRARY_BUILD
if(callbacks.exif_cb)
{
int savepos = ftell(ifp);
callbacks.exif_cb(callbacks.exifparser_data,tag|(pana_raw?0x30000:0),type,len,order,ifp);
fseek(ifp,savepos,SEEK_SET);
}
#endif
switch (tag) {
case 1: if(len==4) pana_raw = get4(); break;
case 5: width = get2(); break;
case 6: height = get2(); break;
case 7: width += get2(); break;
case 9: if ((i = get2())) filters = i;
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
pana_black[3]+=i;
#endif
break;
case 8:
case 10:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
pana_black[3]+=get2();
#endif
break;
case 17: case 18:
if (type == 3 && len == 1)
cam_mul[(tag-17)*2] = get2() / 256.0;
break;
case 23:
if (type == 3) iso_speed = get2();
break;
case 28: case 29: case 30:
#ifdef LIBRAW_LIBRARY_BUILD
if(pana_raw && len == 1 && type ==3)
{
pana_black[tag-28] = get2();
}
else
#endif
{
cblack[tag-28] = get2();
cblack[3] = cblack[1];
}
break;
case 36: case 37: case 38:
cam_mul[tag-36] = get2();
break;
case 39:
if (len < 50 || cam_mul[0]) break;
fseek (ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
case 2: case 256: case 61441: /* ImageWidth */
tiff_ifd[ifd].t_width = getint(type);
break;
case 3: case 257: case 61442: /* ImageHeight */
tiff_ifd[ifd].t_height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
tiff_ifd[ifd].bps = getint(type);
break;
case 61446:
raw_height = 0;
if (tiff_ifd[ifd].bps > 12) break;
load_raw = &CLASS packed_load_raw;
load_flags = get4() ? 24:80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread (desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets (make, 64, ifp);
break;
case 272: /* Model */
fgets (model, 64, ifp);
break;
case 280: /* Panasonic RW2 offset */
if (type != 4) break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4()+base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0) {
fseek (ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].t_width = jh.wide;
tiff_ifd[ifd].t_height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].t_width *= jh.clrs;
i = order;
parse_tiff (tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].t_flip = "50132467"[get2() & 7]-'0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454:
FORC3 cam_mul[(4-c) % 3] = getint(type);
break;
case 305: case 11: /* Software */
fgets (software, 64, ifp);
if (!strncmp(software,"Adobe",5) ||
!strncmp(software,"dcraw",5) ||
!strncmp(software,"UFRaw",5) ||
!strncmp(software,"Bibble",6) ||
!strcmp (software,"Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread (artist, 64, 1, ifp);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].t_tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].t_tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 1)
tiff_ifd[ifd].t_tile_width = tiff_ifd[ifd].t_tile_length = 0;
if (len == 4) {
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 325: /* TileByteCount */
tiff_ifd[ifd].tile_maxbytes = 0;
for(int jj=0;jj<len;jj++)
{
int s = get4();
if(s > tiff_ifd[ifd].tile_maxbytes) tiff_ifd[ifd].tile_maxbytes=s;
}
break;
#endif
case 330: /* SubIFDs */
if (!strcmp(model,"DSLR-A100") && tiff_ifd[ifd].t_width == 3872) {
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4()+base;
ifd++; break;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(make,"Hasselblad") && libraw_internal_data.unpacker_data.hasselblad_parser_flag) {
fseek (ifp, ftell(ifp)+4, SEEK_SET);
fseek (ifp, get4()+base, SEEK_SET);
parse_tiff_ifd (base);
break;
}
#endif
if(len > 1000) len=1000; /* 1000 SubIFDs is enough */
while (len--) {
i = ftell(ifp);
fseek (ifp, get4()+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
fseek (ifp, i+4, SEEK_SET);
}
break;
case 400:
strcpy (make, "Sarnoff");
maximum = 0xfff;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 700:
if((type == 1 || type == 2 || type == 6 || type == 7) && len > 1 && len < 5100000)
{
xmpdata = (char*)malloc(xmplen = len+1);
fread(xmpdata,len,1,ifp);
xmpdata[len]=0;
}
break;
#endif
case 28688:
FORC4 sony_curve[c+1] = get2() >> 2 & 0xfff;
for (i=0; i < 5; i++)
for (j = sony_curve[i]+1; j <= sony_curve[i+1]; j++)
curve[j] = curve[j-1] + (1 << i);
break;
case 29184: sony_offset = get4(); break;
case 29185: sony_length = get4(); break;
case 29217: sony_key = get4(); break;
case 29264:
parse_minolta (ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP (cam_mul[i],cam_mul[i+1])
break;
case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800
for (i=0; i < 3; i++)
FORC3 cmatrix[i][c] = ((short) get2()) / 1024.0;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _(" Sony matrix:\n%f %f %f\n%f %f %f\n%f %f %f\n"), cmatrix[0][0], cmatrix[0][1], cmatrix[0][2], cmatrix[1][0], cmatrix[1][1], cmatrix[1][2], cmatrix[2][0], cmatrix[2][1], cmatrix[2][2]);
#endif
break;
case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, no more needs to be divided by 4
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if(i>cblack[c]) i = cblack[c];
FORC4 cblack[c]-=i;
black = i;
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr, _("...Sony black: %u cblack: %u %u %u %u\n"),black, cblack[0],cblack[1],cblack[2], cblack[3]);
#endif
break;
case 33405: /* Model2 */
fgets (model2, 64, ifp);
break;
case 33421: /* CFARepeatPatternDim */
if (get2() == 6 && get2() == 6)
filters = 9;
break;
case 33422: /* CFAPattern */
if (filters == 9) {
FORC(36) xtrans[0][c] = fgetc(ifp) & 3;
break;
}
case 64777: /* Kodak P-series */
if(len == 36)
{
filters = 9;
colors = 3;
FORC(36) xtrans[0][c] = fgetc(ifp) & 3;
}
else
{
if ((plen=len) > 16) plen = 16;
fread (cfa_pat, 1, plen, ifp);
for (colors=cfa=i=0; i < plen && colors < 4; i++) {
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070) memcpy (cfa_pc,"\003\004\005",3); /* CMY */
if (cfa == 072) memcpy (cfa_pc,"\005\003\004\001",4); /* GMCY */
goto guess_cfa_pc;
}
break;
case 33424:
case 65024:
fseek (ifp, get4()+base, SEEK_SET);
parse_kodak_ifd (base);
break;
case 33434: /* ExposureTime */
shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
fread(imgdata.lens.LensMake, MIN(len, sizeof(imgdata.lens.LensMake)), 1, ifp);
break;
case 0xa434: // LensModel
fread(imgdata.lens.Lens, MIN(len, sizeof(imgdata.lens.Lens)), 1, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = powf64(2.0f, (getreal(type) / 2.0f));
break;
// IB end
#endif
case 34306: /* Leaf white balance */
FORC4 cam_mul[c ^ 1] = 4096.0 / get2();
break;
case 34307: /* Leaf CatchLight color matrix */
fread (software, 1, 7, ifp);
if (strncmp(software,"MATRIX",6)) break;
colors = 4;
for (raw_color = i=0; i < 3; i++) {
FORC4 fscanf (ifp, "%f", &rgb_cam[i][c^1]);
if (!use_camera_wb) continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= num;
}
break;
case 34310: /* Leaf metadata */
parse_mos (ftell(ifp));
case 34303:
strcpy (make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek (ifp, get4()+base, SEEK_SET);
parse_exif (base);
break;
case 34853: /* GPSInfo tag */
{
unsigned pos;
fseek(ifp, pos = (get4() + base), SEEK_SET);
parse_gps(base);
#ifdef LIBRAW_LIBRARY_BUILD
fseek(ifp, pos, SEEK_SET);
parse_gps_libraw(base);
#endif
}
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i=0; i < 3; i++) {
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
switch (tiff_ifd[ifd].comp) {
case 32770: load_raw = &CLASS samsung_load_raw; break;
case 32772: load_raw = &CLASS samsung2_load_raw; break;
case 32773: load_raw = &CLASS samsung3_load_raw; break;
}
break;
case 46275: /* Imacon tags */
strcpy (make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len) break;
fseek (ifp, 38, SEEK_CUR);
case 46274:
fseek (ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262 && ima_len == 234317952 ) {
height = 5412;
width = 7216;
left_margin = 7;
filters=0;
} else if (raw_width == 7262) {
height = 5444;
width = 7244;
left_margin = 7;
}
fseek (ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek (ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len) {
if (flip % 180 == 90) SWAP(width,height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf (model, "Ixpress %d-Mp", height*width/1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters) {
if (left_margin & 1) filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (!(cbuf = (char *) malloc(len))) break;
fread (cbuf, 1, len, ifp);
for (cp = cbuf-1; cp && cp < cbuf+len; cp = strchr(cp,'\n'))
if (!strncmp (++cp,"Neutral ",8))
sscanf (cp+8, "%f %f %f", cam_mul, cam_mul+1, cam_mul+2);
free (cbuf);
break;
case 50458:
if (!make[0]) strcpy (make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
#ifdef LIBRAW_LIBRARY_BUILD
libraw_internal_data.unpacker_data.hasselblad_parser_flag=1;
#endif
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek (ifp, j+(get2(),get4()), SEEK_SET);
parse_tiff_ifd (j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0]) strcpy (make, "DNG");
is_raw = 1;
break;
case 50710: /* CFAPlaneColor */
if (filters == 9) break;
if (len > 4) len = 4;
colors = len;
fread (cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i=16; i--; )
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2) {
fuji_width = 1;
filters = 0x49494949;
}
break;
case 291:
case 50712: /* LinearizationTable */
linear_table (len);
break;
case 50713: /* BlackLevelRepeatDim */
cblack[4] = get2();
cblack[5] = get2();
if (cblack[4] * cblack[5] > (sizeof(cblack) / sizeof (cblack[0]) - 6))
cblack[4] = cblack[5] = 1;
break;
case 61450:
cblack[4] = cblack[5] = MIN(sqrt((double)len),64);
case 50714: /* BlackLevel */
if((cblack[4] * cblack[5] < 2) && len == 1)
{
black = getreal(type);
}
else if(cblack[4] * cblack[5] <= len)
{
FORC (cblack[4] * cblack[5])
cblack[6+c] = getreal(type);
black = 0;
}
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num=i=0; i < len && i < 65536; i++)
num += getreal(type);
black += num/len + 0.5;
break;
case 50717: /* WhiteLevel */
maximum = getint(type);
break;
case 50718: /* DefaultScale */
pixel_aspect = getreal(type);
pixel_aspect /= getreal(type);
if(pixel_aspect > 0.995 && pixel_aspect < 1.005)
pixel_aspect = 1.0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50778:
imgdata.color.dng_color[0].illuminant = get2();
break;
case 50779:
imgdata.color.dng_color[1].illuminant = get2();
break;
#endif
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 50721?0:1;
#endif
FORCC for (j=0; j < 3; j++)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[i].colormatrix[c][j]=
#endif
cm[c][j] = getreal(type);
}
use_cm = 1;
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
#ifdef LIBRAW_LIBRARY_BUILD
j = tag == 50723?0:1;
#endif
for (i=0; i < colors; i++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.dng_color[j].calibration[i][c]=
#endif
cc[i][c] = getreal(type);
}
break;
case 50727: /* AnalogBalance */
FORCC ab[c] = getreal(type);
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50730: /* DNG: Baseline Exposure */
baseline_exposure = getreal(type);
break;
#endif
// IB start
case 50740: /* tag 0xc634 : DNG Adobe, DNG Pentax, Sony SR2, DNG Private */
{
char mbuf[64];
unsigned short makernote_found = 0;
unsigned curr_pos, start_pos = ftell(ifp);
unsigned MakN_order, m_sorder = order;
unsigned MakN_length;
unsigned pos_in_original_raw;
fread(mbuf, 1, 6, ifp);
if (!strcmp(mbuf, "Adobe")) {
order = 0x4d4d; // Adobe header is always in "MM" / big endian
curr_pos = start_pos + 6;
while (curr_pos + 8 - start_pos <= len)
{
fread(mbuf, 1, 4, ifp);
curr_pos += 8;
if (!strncmp(mbuf, "MakN", 4)) {
makernote_found = 1;
MakN_length = get4();
MakN_order = get2();
pos_in_original_raw = get4();
order = MakN_order;
parse_makernote_0xc634(curr_pos + 6 - pos_in_original_raw, 0, AdobeDNG);
break;
}
}
}
else {
fread(mbuf + 6, 1, 2, ifp);
if (!strcmp(mbuf, "PENTAX ") ||
!strcmp(mbuf, "SAMSUNG"))
{
makernote_found = 1;
fseek(ifp, start_pos, SEEK_SET);
parse_makernote_0xc634(base, 0, CameraDNG);
}
}
if (!makernote_found) fseek(ifp, start_pos, SEEK_SET);
order = m_sorder;
}
// IB end
if (dng_version) break;
parse_minolta (j = get4()+base);
fseek (ifp, j, SEEK_SET);
parse_tiff_ifd (base);
break;
case 50752:
read_shorts (cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i=0; i < len && i < 32; i++)
mask[0][i] = getint(type);
black = 0;
break;
case 51009: /* OpcodeList2 */
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13) break;
fseek (ifp, 16, SEEK_CUR);
data_offset = get4();
fseek (ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2) fgets (model2, 64, ifp);
}
fseek (ifp, save, SEEK_SET);
}
if (sony_length && (buf = (unsigned *) malloc(sony_length))) {
fseek (ifp, sony_offset, SEEK_SET);
fread (buf, sony_length, 1, ifp);
sony_decrypt (buf, sony_length/4, 1, sony_key);
#ifndef LIBRAW_LIBRARY_BUILD
sfp = ifp;
if ((ifp = tmpfile())) {
fwrite (buf, sony_length, 1, ifp);
fseek (ifp, 0, SEEK_SET);
parse_tiff_ifd (-sony_offset);
fclose (ifp);
}
ifp = sfp;
#else
if( !ifp->tempbuffer_open(buf,sony_length))
{
parse_tiff_ifd(-sony_offset);
ifp->tempbuffer_close();
}
#endif
free (buf);
}
for (i=0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm) {
FORCC for (i=0; i < 3; i++)
for (cam_xyz[c][i]=j=0; j < colors; j++)
cam_xyz[c][i] += cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff (cmatrix, cam_xyz);
}
if (asn[0]) {
cam_mul[3] = 0;
FORCC cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff (int base)
{
int doff;
fseek (ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d) return 0;
get2();
while ((doff = get4())) {
fseek (ifp, doff+base, SEEK_SET);
if (parse_tiff_ifd (base)) break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp=0, raw=-1, thm=-1, i;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
if((unsigned)jh.bits<17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000)
{
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
}
for (i=0; i < tiff_nifds; i++) {
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3) max_samp = 3;
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
(unsigned)tiff_ifd[i].bps < 33 && (unsigned)tiff_ifd[i].samples < 13 &&
tiff_ifd[i].t_width*tiff_ifd[i].t_height > raw_width*raw_height) {
raw_width = tiff_ifd[i].t_width;
raw_height = tiff_ifd[i].t_height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
tiff_flip = tiff_ifd[i].t_flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].t_tile_width;
tile_length = tiff_ifd[i].t_tile_length;
#ifdef LIBRAW_LIBRARY_BUILD
data_size = tile_length < INT_MAX && tile_length>0 ? tiff_ifd[i].tile_maxbytes: tiff_ifd[i].bytes;
#endif
raw = i;
}
}
if (!tile_width ) tile_width = INT_MAX;
if (!tile_length) tile_length = INT_MAX;
for (i=tiff_nifds; i--; )
if (tiff_ifd[i].t_flip) tiff_flip = tiff_ifd[i].t_flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress) {
case 32767:
if (tiff_ifd[raw].bytes == raw_width*raw_height) {
tiff_bps = 12;
load_raw = &CLASS sony_arw2_load_raw; break;
}
if (tiff_ifd[raw].bytes*8 != raw_width*raw_height*tiff_bps) {
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw; break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773: goto slr;
case 0: case 1:
#ifdef LIBRAW_LIBRARY_BUILD
if(!strcasecmp(make,"Nikon") && !strncmp(software,"Nikon Scan",10))
{
load_raw = &CLASS nikon_coolscan_load_raw;
raw_color = 1;
filters = 0;
break;
}
#endif
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*2 == raw_width*raw_height*3)
load_flags = 24;
if (tiff_ifd[raw].bytes*5 == raw_width*raw_height*8) {
load_flags = 81;
tiff_bps = 12;
} slr:
switch (tiff_bps) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 12: if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw; break;
case 14: load_flags = 0;
case 16: load_raw = &CLASS unpacked_load_raw;
if (!strncmp(make,"OLYMPUS",7) &&
tiff_ifd[raw].bytes*7 > raw_width*raw_height)
load_raw = &CLASS olympus_load_raw;
}
break;
case 6: case 7: case 99:
load_raw = &CLASS lossless_jpeg_load_raw; break;
case 262:
load_raw = &CLASS kodak_262_load_raw; break;
case 34713:
if ((raw_width+9)/10*16*raw_height == tiff_ifd[raw].bytes) {
load_raw = &CLASS packed_load_raw;
load_flags = 1;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes*2) {
load_raw = &CLASS packed_load_raw;
if (model[0] == 'N') load_flags = 80;
} else if (raw_width*raw_height*3 == tiff_ifd[raw].bytes) {
load_raw = &CLASS nikon_yuv_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
memset (cblack, 0, sizeof cblack);
filters = 0;
} else if (raw_width*raw_height*2 == tiff_ifd[raw].bytes) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
} else
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width*raw_height*3 == tiff_ifd[raw].bytes*2)
{
load_raw = &CLASS packed_load_raw;
load_flags=80;
}
else
#endif
load_raw = &CLASS nikon_load_raw; break;
case 65535:
load_raw = &CLASS pentax_load_raw; break;
case 65000:
switch (tiff_ifd[raw].phint) {
case 2: load_raw = &CLASS kodak_rgb_load_raw; filters = 0; break;
case 6: load_raw = &CLASS kodak_ycbcr_load_raw; filters = 0; break;
case 32803: load_raw = &CLASS kodak_65000_load_raw;
}
case 32867: case 34892: break;
default: is_raw = 0;
}
if (!dng_version)
if ( ((tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 &&
(tiff_compress & -16) != 32768)
|| (tiff_bps == 8 && !strcasestr(make,"Kodak") &&
!strstr(model2,"DEBUG RAW")))
&& strncmp(software,"Nikon Scan",10))
is_raw = 0;
for (i=0; i < tiff_nifds; i++)
if (i != raw && tiff_ifd[i].samples == max_samp &&
tiff_ifd[i].bps>0 && tiff_ifd[i].bps < 33 &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps)+1) >
thumb_width * thumb_height / (SQR(thumb_misc)+1)
&& tiff_ifd[i].comp != 34892) {
thumb_width = tiff_ifd[i].t_width;
thumb_height = tiff_ifd[i].t_height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0) {
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp) {
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strcmp(make,"Imacon"))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ?
&CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta (int base)
{
int save, tag, len, offset, high=0, wide=0, i, c;
short sorder=order;
fseek (ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp)-'M' || fgetc(ifp)-'R') return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
while ((save=ftell(ifp)) < offset) {
for (tag=i=0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
switch (tag) {
case 0x505244: /* PRD */
fseek (ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
case 0x574247: /* WBG */
get4();
i = strcmp(model,"DiMAGE A200") ? 0:3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff (ftell(ifp));
data_offset = offset;
}
fseek (ifp, save+len+8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *save=ifp;
#else
#if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310)
if(ifp->wfname())
{
std::wstring rawfile(ifp->wfname());
rawfile.replace(rawfile.length()-3,3,L"JPG");
if(!ifp->subfile_open(rawfile.c_str()))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
if(!ifp->fname())
{
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
return;
}
#endif
ext = strrchr (ifname, '.');
file = strrchr (ifname, '/');
if (!file) file = strrchr (ifname, '\\');
#ifndef LIBRAW_LIBRARY_BUILD
if (!file) file = ifname-1;
#else
if (!file) file = (char*)ifname-1;
#endif
file++;
if (!ext || strlen(ext) != 4 || ext-file != 8) return;
jname = (char *) malloc (strlen(ifname) + 1);
merror (jname, "parse_external_jpeg()");
strcpy (jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp (ext, ".jpg")) {
strcpy (jext, isupper(ext[1]) ? ".JPG":".jpg");
if (isdigit(*file)) {
memcpy (jfile, file+4, 4);
memcpy (jfile+4, file, 4);
}
} else
while (isdigit(*--jext)) {
if (*jext != '9') {
(*jext)++;
break;
}
*jext = '0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (strcmp (jname, ifname)) {
if ((ifp = fopen (jname, "rb"))) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Reading metadata from %s ...\n"), jname);
#endif
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
fclose (ifp);
}
}
#else
if (strcmp (jname, ifname))
{
if(!ifp->subfile_open(jname))
{
parse_tiff (12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
}
#endif
if (!timestamp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA ;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("Failed to read metadata from %s\n"), jname);
#endif
}
free (jname);
#ifndef LIBRAW_LIBRARY_BUILD
ifp = save;
#endif
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = { 0x410, 0x45f3 };
int i, bpp, row, col, vbits=0;
unsigned long bitbuf=0;
if ((get2(),get4()) != 0x80008 || !get4()) return;
bpp = get2();
if (bpp != 10 && bpp != 12) return;
for (i=row=0; row < 8; row++)
for (col=0; col < 8; col++) {
if (vbits < bpp) {
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] =
bitbuf << (LONG_BIT - vbits) >> (LONG_BIT - bpp);
vbits -= bpp;
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff (int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi=-1;
ushort key[] = { 0x410, 0x45f3 };
fseek (ifp, offset+length-4, SEEK_SET);
tboff = get4() + offset;
fseek (ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127) return;
while (nrecs--) {
type = get2();
len = get4();
// printf ("\n*** type: 0x%04x len: 0x%04x", type, len);
save = ftell(ifp) + 4;
fseek (ifp, offset+get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38) {
parse_ciff (ftell(ifp), len, depth+1); /* Parse a sub-table */
}
if (type == 0x0810)
fread (artist, 64, 1, ifp);
if (type == 0x080a) {
fread (make, 64, 1, ifp);
fseek (ifp, strlen(make) - 63, SEEK_CUR);
fread (model, 64, 1, ifp);
}
if (type == 0x1810) {
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007) {
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818) {
shutter = powf64(2.0f, -int_to_float((get4(),get4())));
aperture = powf64(2.0f, int_to_float(get4())/2);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurAp = aperture;
#endif
}
if (type == 0x102a) {
// iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50;
iso_speed = powf64(2.0f, ((get2(),get2()) + get2())/32.0f - 5.0f) * 100.0f;
#ifdef LIBRAW_LIBRARY_BUILD
aperture = _CanonConvertAperture((get2(),get2()));
imgdata.lens.makernotes.CurAp = aperture;
#else
aperture = powf64(2.0, (get2(),(short)get2())/64.0);
#endif
shutter = powf64(2.0,-((short)get2())/32.0);
wbi = (get2(),get2());
if (wbi > 17) wbi = 0;
fseek (ifp, 32, SEEK_CUR);
if (shutter > 1e6) shutter = get2()/10.0;
}
if (type == 0x102c) {
if (get2() > 512) { /* Pro90, G1 */
fseek (ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
} else { /* G2, S30, S40 */
fseek (ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x102d) {
fseek(ifp, 44, SEEK_CUR);
imgdata.lens.makernotes.LensID = get2();
imgdata.lens.makernotes.MaxFocal = get2();
imgdata.lens.makernotes.MinFocal = get2();
imgdata.lens.makernotes.CanonFocalUnits = get2();
if (imgdata.lens.makernotes.CanonFocalUnits != 1)
{
imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2());
imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2());
}
#endif
if (type == 0x0032) {
if (len == 768) { /* EOS D30 */
fseek (ifp, 72, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = 1024.0 / get2();
if (!wbi) cam_mul[0] = -1; /* use my auto white balance */
} else if (!cam_mul[0]) {
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model,"Pro1") ?
"012346000000000000":"01345:000000006008")[wbi]-'0'+ 2;
else { /* G3, G5, S45, S50 */
c = "023457000000006000"[wbi]-'0';
key[0] = key[1] = 0;
}
fseek (ifp, 78 + c*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi) cam_mul[0] = -1;
}
}
if (type == 0x10a9) { /* D60, 10D, 300D, and clones */
if (len > 66) wbi = "0134567028"[wbi]-'0';
fseek (ifp, 2 + wbi*8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031) {
raw_width = (get2(),get2());
raw_height = get2();
}
if (type == 0x501c) {
iso_speed = len & 0xffff;
}
if (type == 0x5029) {
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurFocal = len >> 16;
imgdata.lens.makernotes.FocalType = len & 0xffff;
if (imgdata.lens.makernotes.FocalType == 2) {
imgdata.lens.makernotes.CanonFocalUnits = 32;
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
focal_len = imgdata.lens.makernotes.CurFocal;
#else
focal_len = len >> 16;
if ((len & 0xffff) == 2) focal_len /= 32;
#endif
}
if (type == 0x5813) flash_used = int_to_float(len);
if (type == 0x5814) canon_ev = int_to_float(len);
if (type == 0x5817) shot_order = len;
if (type == 0x5834)
{
unique_id = len;
#ifdef LIBRAW_LIBRARY_BUILD
setCanonBodyFeatures(unique_id);
#endif
}
if (type == 0x580e) timestamp = len;
if (type == 0x180e) timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime (gmtime (×tamp));
#endif
fseek (ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek (ifp, 0, SEEK_SET);
memset (&t, 0, sizeof t);
do {
fgets (line, 128, ifp);
if ((val = strchr(line,'=')))
*val++ = 0;
else
val = line + strlen(line);
if (!strcmp(line,"DAT"))
sscanf (val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line,"TIM"))
sscanf (val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line,"HDR"))
thumb_offset = atoi(val);
if (!strcmp(line,"X "))
raw_width = atoi(val);
if (!strcmp(line,"Y "))
raw_height = atoi(val);
if (!strcmp(line,"TX "))
thumb_width = atoi(val);
if (!strcmp(line,"TY "))
thumb_height = atoi(val);
} while (strncmp(line,"EOHD",4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy (make, "Rollei");
strcpy (model,"d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
entries = get4();
fseek (ifp, get4(), SEEK_SET);
while (entries--) {
off = get4(); get4();
fread (str, 8, 1, ifp);
if (!strcmp(str,"META")) meta_offset = off;
if (!strcmp(str,"THUMB")) thumb_offset = off;
if (!strcmp(str,"RAW0")) data_offset = off;
}
fseek (ifp, meta_offset+20, SEEK_SET);
fread (make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make,' '))) {
strcpy (model, cp+1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(),get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one (int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
#ifdef LIBRAW_LIBRARY_BUILD
char body_id[3];
body_id[0] = 0;
#endif
memset (&ph1, 0, sizeof ph1);
fseek (ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177) return; /* "Raw" */
fseek (ifp, get4()+base, SEEK_SET);
entries = get4();
get4();
while (entries--) {
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, base+data, SEEK_SET);
switch (tag) {
#ifdef LIBRAW_LIBRARY_BUILD
case 0x0102:
fread(body_id, 1, 3, ifp);
if ((body_id[0] == 0x4c) && (body_id[1] == 0x49)) {
body_id[1] = body_id[2];
}
unique_id = (((body_id[0] & 0x3f) << 5) | (body_id[1] & 0x3f)) - 0x41;
setPhaseOneFeatures(unique_id);
break;
case 0x0401:
if (type == 4) imgdata.lens.makernotes.CurAp = powf64(2.0f, (int_to_float(data)/2.0f));
else imgdata.lens.makernotes.CurAp = powf64(2.0f, (getreal(type)/2.0f));
break;
case 0x0403:
if (type == 4) imgdata.lens.makernotes.CurFocal = int_to_float(data);
else imgdata.lens.makernotes.CurFocal = getreal(type);
break;
case 0x0410:
fread(imgdata.lens.makernotes.body, 1, len, ifp);
break;
case 0x0412:
fread(imgdata.lens.makernotes.Lens, 1, len, ifp);
break;
case 0x0414:
if (type == 4) {
imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (int_to_float(data)/2.0f));
} else {
imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0415:
if (type == 4) {
imgdata.lens.makernotes.MinAp4CurFocal = powf64(2.0f, (int_to_float(data)/2.0f));
} else {
imgdata.lens.makernotes.MinAp4CurFocal = powf64(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0416:
if (type == 4) {
imgdata.lens.makernotes.MinFocal = int_to_float(data);
} else {
imgdata.lens.makernotes.MinFocal = getreal(type);
}
if (imgdata.lens.makernotes.MinFocal > 1000.0f)
{
imgdata.lens.makernotes.MinFocal = 0.0f;
}
break;
case 0x0417:
if (type == 4) {
imgdata.lens.makernotes.MaxFocal = int_to_float(data);
} else {
imgdata.lens.makernotes.MaxFocal = getreal(type);
}
break;
#endif
case 0x100: flip = "0653"[data & 3]-'0'; break;
case 0x106:
for (i=0; i < 9; i++)
romm_cam[0][i] = getreal(11);
romm_coeff (romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108: raw_width = data; break;
case 0x109: raw_height = data; break;
case 0x10a: left_margin = data; break;
case 0x10b: top_margin = data; break;
case 0x10c: width = data; break;
case 0x10d: height = data; break;
case 0x10e: ph1.format = data; break;
case 0x10f: data_offset = data+base; break;
case 0x110: meta_offset = data+base;
meta_length = len; break;
case 0x112: ph1.key_off = save - 4; break;
case 0x210: ph1.tag_210 = int_to_float(data); break;
case 0x21a: ph1.tag_21a = data; break;
case 0x21c: strip_offset = data+base; break;
case 0x21d: ph1.t_black = data; break;
case 0x222: ph1.split_col = data; break;
case 0x223: ph1.black_col = data+base; break;
case 0x224: ph1.split_row = data; break;
case 0x225: ph1.black_row = data+base; break;
case 0x301:
model[63] = 0;
fread (model, 1, 63, ifp);
if ((cp = strstr(model," camera"))) *cp = 0;
}
fseek (ifp, save, SEEK_SET);
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.lens.makernotes.body[0] && !body_id[0]) {
fseek (ifp, meta_offset, SEEK_SET);
order = get2();
fseek (ifp, 6, SEEK_CUR);
fseek (ifp, meta_offset+get4(), SEEK_SET);
entries = get4(); get4();
while (entries--) {
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek (ifp, meta_offset+data, SEEK_SET);
if (tag == 0x0407) {
fread(body_id, 1, 3, ifp);
if ((body_id[0] == 0x4c) && (body_id[1] == 0x49)) {
body_id[1] = body_id[2];
}
unique_id = (((body_id[0] & 0x3f) << 5) | (body_id[1] & 0x3f)) - 0x41;
setPhaseOneFeatures(unique_id);
}
fseek (ifp, save, SEEK_SET);
}
}
#endif
load_raw = ph1.format < 3 ?
&CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy (make, "Phase One");
if (model[0]) return;
switch (raw_height) {
case 2060: strcpy (model,"LightPhase"); break;
case 2682: strcpy (model,"H 10"); break;
case 4128: strcpy (model,"H 20"); break;
case 5488: strcpy (model,"H 25"); break;
}
}
void CLASS parse_fuji (int offset)
{
unsigned entries, tag, len, save, c;
fseek (ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255) return;
while (entries--) {
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100) {
raw_height = get2();
raw_width = get2();
} else if (tag == 0x121) {
height = get2();
if ((width = get2()) == 4284) width += 3;
} else if (tag == 0x130) {
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
} else if (tag == 0x131) {
filters = 9;
FORC(36) xtrans_abs[0][35-c] = fgetc(ifp) & 3;
} else if (tag == 0x2ff0) {
FORC4 cam_mul[c ^ 1] = get2();
} else if (tag == 0xc000) {
c = order;
order = 0x4949;
if ((tag = get4()) > 10000) tag = get4();
width = tag;
height = get4();
order = c;
}
fseek (ifp, save+len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg (int offset)
{
int len, save, hlen, mark;
fseek (ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8) return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda) {
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3) {
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff (save+hlen, len-hlen, 0);
}
if (parse_tiff (save+6)) apply_tiff();
fseek (ifp, save+len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] =
{ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec" };
struct tm t;
order = 0x4949;
fread (tag, 4, 1, ifp);
size = get4();
end = ftell(ifp) + size;
if (!memcmp(tag,"RIFF",4) || !memcmp(tag,"LIST",4)) {
int maxloop = 1000;
get4();
while (ftell(ifp)+7 < end && !feof(ifp) && maxloop--)
parse_riff();
} else if (!memcmp(tag,"nctg",4)) {
while (ftell(ifp)+7 < end) {
i = get2();
size = get2();
if ((i+1) >> 1 == 10 && size == 20)
get_timestamp(0);
else fseek (ifp, size, SEEK_CUR);
}
} else if (!memcmp(tag,"IDIT",4) && size < 64) {
fread (date, 64, 1, ifp);
date[size] = 0;
memset (&t, 0, sizeof t);
if (sscanf (date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday,
&t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6) {
for (i=0; i < 12 && strcasecmp(mon[i],month); i++);
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
} else
fseek (ifp, size, SEEK_CUR);
}
void CLASS parse_qt (int end)
{
unsigned save, size;
char tag[4];
order = 0x4d4d;
while (ftell(ifp)+7 < end) {
save = ftell(ifp);
if ((size = get4()) < 8) return;
fread (tag, 4, 1, ifp);
if (!memcmp(tag,"moov",4) ||
!memcmp(tag,"udta",4) ||
!memcmp(tag,"CNTH",4))
parse_qt (save+size);
if (!memcmp(tag,"CNDA",4))
parse_jpeg (ftell(ifp));
fseek (ifp, save+size, SEEK_SET);
}
}
void CLASS parse_smal (int offset, int fsize)
{
int ver;
fseek (ifp, offset+2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek (ifp, 5, SEEK_CUR);
if (get4() != fsize) return;
if (ver > 6) data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy (make, "SMaL");
sprintf (model, "v%d %dx%d", ver, width, height);
if (ver == 6) load_raw = &CLASS smal_v6_load_raw;
if (ver == 9) load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek (ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek (ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4())) timestamp = i;
fseek (ifp, off_head+4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(),get2()) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 16: load_raw = &CLASS unpacked_load_raw;
}
fseek (ifp, off_setup+792, SEEK_SET);
strcpy (make, "CINE");
sprintf (model, "%d", get4());
fseek (ifp, 12, SEEK_CUR);
switch ((i=get4()) & 0xffffff) {
case 3: filters = 0x94949494; break;
case 4: filters = 0x49494949; break;
default: is_raw = 0;
}
fseek (ifp, 72, SEEK_CUR);
switch ((get4()+3600) % 360) {
case 270: flip = 4; break;
case 180: flip = 1; break;
case 90: flip = 7; break;
case 0: flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~((~0u) << get4());
fseek (ifp, 668, SEEK_CUR);
shutter = get4()/1000000000.0;
fseek (ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek (ifp, shot_select*8, SEEK_CUR);
data_offset = (INT64) get4() + 8;
data_offset += (INT64) get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek (ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek (ifp, 0, SEEK_END);
fseek (ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: Tail is missing, parsing from head...\n"), ifname);
#endif
fseek (ifp, 0, SEEK_SET);
while ((len = get4()) != EOF) {
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek (ifp, len-8, SEEK_CUR);
}
} else {
rdvo = get4();
fseek (ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko (ifp, rdvo+8 + shot_select*4, SEEK_SET);
data_offset = get4();
}
}
//@end COMMON
char * CLASS foveon_gets (int offset, char *str, int len)
{
int i;
fseek (ifp, offset, SEEK_SET);
for (i=0; i < len-1; i++)
if ((str[i] = get2()) == 0) break;
str[i] = 0;
return str;
}
void CLASS parse_foveon()
{
int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2];
char name[64], value[64];
order = 0x4949; /* Little-endian */
fseek (ifp, 36, SEEK_SET);
flip = get4();
fseek (ifp, -4, SEEK_END);
fseek (ifp, get4(), SEEK_SET);
if (get4() != 0x64434553) return; /* SECd */
entries = (get4(),get4());
while (entries--) {
off = get4();
len = get4();
tag = get4();
save = ftell(ifp);
fseek (ifp, off, SEEK_SET);
if (get4() != (0x20434553 | (tag << 24))) return;
switch (tag) {
case 0x47414d49: /* IMAG */
case 0x32414d49: /* IMA2 */
fseek (ifp, 8, SEEK_CUR);
pent = get4();
wide = get4();
high = get4();
if (wide > raw_width && high > raw_height) {
switch (pent) {
case 5: load_flags = 1;
case 6: load_raw = &CLASS foveon_sd_load_raw; break;
case 30: load_raw = &CLASS foveon_dp_load_raw; break;
default: load_raw = 0;
}
raw_width = wide;
raw_height = high;
data_offset = off+28;
is_foveon = 1;
}
fseek (ifp, off+28, SEEK_SET);
if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8
&& thumb_length < len-28) {
thumb_offset = off+28;
thumb_length = len-28;
write_thumb = &CLASS jpeg_thumb;
}
if (++img == 2 && !thumb_length) {
thumb_offset = off+24;
thumb_width = wide;
thumb_height = high;
write_thumb = &CLASS foveon_thumb;
}
break;
case 0x464d4143: /* CAMF */
meta_offset = off+8;
meta_length = len-28;
break;
case 0x504f5250: /* PROP */
pent = (get4(),get4());
fseek (ifp, 12, SEEK_CUR);
off += pent*8 + 24;
if ((unsigned) pent > 256) pent=256;
for (i=0; i < pent*2; i++)
poff[0][i] = off + get4()*2;
for (i=0; i < pent; i++) {
foveon_gets (poff[i][0], name, 64);
foveon_gets (poff[i][1], value, 64);
if (!strcmp (name, "ISO"))
iso_speed = atoi(value);
if (!strcmp (name, "CAMMANUF"))
strcpy (make, value);
if (!strcmp (name, "CAMMODEL"))
strcpy (model, value);
if (!strcmp (name, "WB_DESC"))
strcpy (model2, value);
if (!strcmp (name, "TIME"))
timestamp = atoi(value);
if (!strcmp (name, "EXPTIME"))
shutter = atoi(value) / 1000000.0;
if (!strcmp (name, "APERTURE"))
aperture = atof(value);
if (!strcmp (name, "FLENGTH"))
focal_len = atof(value);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp (name, "FLEQ35MM"))
imgdata.lens.makernotes.FocalLengthIn35mmFormat = atof(value);
if (!strcmp (name, "LENSARANGE"))
{
char *sp;
imgdata.lens.makernotes.MaxAp4CurFocal = imgdata.lens.makernotes.MinAp4CurFocal = atof(value);
sp = strrchr (value, ' ');
if (sp)
{
imgdata.lens.makernotes.MinAp4CurFocal = atof(sp);
if (imgdata.lens.makernotes.MaxAp4CurFocal > imgdata.lens.makernotes.MinAp4CurFocal)
my_swap (float, imgdata.lens.makernotes.MaxAp4CurFocal, imgdata.lens.makernotes.MinAp4CurFocal);
}
}
if (!strcmp (name, "LENSFRANGE"))
{
char *sp;
imgdata.lens.makernotes.MinFocal = imgdata.lens.makernotes.MaxFocal = atof(value);
sp = strrchr (value, ' ');
if (sp)
{
imgdata.lens.makernotes.MaxFocal = atof(sp);
if ((imgdata.lens.makernotes.MaxFocal + 0.17f) < imgdata.lens.makernotes.MinFocal)
my_swap (float, imgdata.lens.makernotes.MaxFocal, imgdata.lens.makernotes.MinFocal);
}
}
if (!strcmp (name, "LENSMODEL"))
{
imgdata.lens.makernotes.LensID = atoi(value);
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = Sigma_X3F;
}
}
#endif
}
#ifdef LOCALTIME
timestamp = mktime (gmtime (×tamp));
#endif
}
fseek (ifp, save, SEEK_SET);
}
}
//@out COMMON
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff (const char *t_make, const char *t_model
#ifdef LIBRAW_LIBRARY_BUILD
,int internal_only
#endif
)
{
static const struct {
const char *prefix;
int t_black, t_maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0,
{ 9805,-2689,-1312,-5803,13064,3068,-2438,3075,8775 } },
{ "Canon EOS D60", 0, 0xfa0,
{ 6188,-1341,-890,-7168,14489,2937,-2640,3228,8483 } },
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D", 0, 0x3c82,
{ 8621,-2197,-787,-3150,11358,912,-1161,2400,4836 } },
{ "Canon EOS 7D Mark II", 0, 0x3510,
{ 7268,-1082,-969,-4186,11839,2663,-825,2029,5839 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 10D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3bc7,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 300D", 0, 0xfa0,
{ 8197,-2000,-1118,-6714,14335,2592,-2536,3178,8266 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS 1200D", 0, 0x37c2,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20,
{ 4374,3631,-1743,-7520,15212,2472,-2892,3632,8161 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon EOS C500", 853, 0, /* DJC */
{ 17851,-10604,922,-7425,16662,763,-3660,3636,22278 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0,
{ 14130,-8071,127,2199,6528,1551,3402,-1721,4960 } },
{ "Canon PowerShot G1 X Mark II", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0,
{ -4778,9467,2172,4743,-1141,4344,-5146,9908,6077,-1566,11051,557 } },
{ "Canon PowerShot G2", 0, 0,
{ 9087,-2693,-1049,-6715,14382,2537,-2291,2819,7790 } },
{ "Canon PowerShot G3", 0, 0,
{ 9212,-2781,-1073,-6573,14189,2605,-2300,2844,7664 } },
{ "Canon PowerShot G5", 0, 0,
{ 9757,-2872,-933,-5972,13861,2301,-1622,2328,7212 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G7 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0,
{ -4155,9818,1529,3939,-25,4522,-5521,9870,6610,-2238,10873,1342 } },
{ "Canon PowerShot Pro90", 0, 0,
{ -4963,9896,2235,4642,-987,4294,-5162,10011,5859,-1770,11230,577 } },
{ "Canon PowerShot S30", 0, 0,
{ 10566,-3652,-1129,-6552,14662,2006,-2197,2581,7670 } },
{ "Canon PowerShot S40", 0, 0,
{ 8510,-2487,-940,-6869,14231,2900,-2318,2829,9013 } },
{ "Canon PowerShot S45", 0, 0,
{ 8163,-2333,-955,-6682,14174,2751,-2077,2597,8041 } },
{ "Canon PowerShot S50", 0, 0,
{ 8882,-2571,-863,-6348,14234,2288,-1516,2172,6569 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S120", 0, 0, /* LibRaw */
{ 10800,-4782,-628,-2057,10783,1176,-802,2091,4739 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot SX60 HS", 0, 0,
{ 13161,-5451,-1344,-1989,10654,1531,-47,1271,4955 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0,
{ 11044,-3888,-1120,-7248,15168,2208,-1531,2277,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S1", 0, 0,
{ 12297,-4882,-1202,-2106,10691,1623,-88,1312,4790 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S20", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S2Pro", 128, 0,
{ 12492,-4690,-1402,-7033,15423,1647,-1507,2111,7697 } },
{ "Fujifilm S3Pro", 0, 0,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS2", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm F900EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100T", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X30", 0, 0,
{ 12328,-5256,-1144,-4469,12927,1675,-87,1291,4351 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-A1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2", 0, 0,
{ 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-M1", 0, 0,
{ 13193,-6685,-425,-2229,10458,1534,-878,1763,5217 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-T1", 0, 0, /* LibRaw */
{ 12066,-5927,-367,-1969,9878,1503,-721,2034,5453 } },
{ "Fujifilm XQ1", 0, 0,
{ 14305,-7365,-687,-3117,12383,432,-287,1660,4361 } },
{ "Hasselblad Lunar", -512, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Hasselblad Stellar", -800, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Hasselblad CFV", 0, 0, /* Adobe */
{ 8519, -3260, -280, -5081, 13459, 1738, -1449, 2960, 7809, } },
{ "Hasselblad H-16MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-22MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{"Hasselblad H-31MP",0, 0, /* LibRaw */
{ 14480,-5448,-1686,-3534,13123,2260,384,2952,7232 } },
{"Hasselblad H-39MP",0, 0, /* Adobe */
{3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718}},
{ "Hasselblad H3D-50", 0, 0, /* Adobe */
{3857,452, -46, -6008, 14477, 1596, -2627, 4481, 5718}},
{"Hasselblad H4D-40",0, 0, /* LibRaw */
{ 6325,-860,-957,-6559,15945,266,167,770,5936 } },
{"Hasselblad H4D-50",0, 0, /* LibRaw */
{ 15283,-6272,-465,-2030,16031,478,-2379,390,7965 } },
{"Hasselblad H4D-60",0, 0, /* Adobe */
{9662, -684, -279, -4903, 12293, 2950, -344, 1669, 6024}},
{"Hasselblad H5D-50c",0, 0, /* Adobe */
{4932, -835, 141, -4878, 11868, 3437, -1138, 1961, 7067}},
{"Hasselblad H5D-50",0, 0, /* Adobe */
{5656, -659, -346, -3923, 12306, 1791, -1602, 3509, 5442}},
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", -8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", -8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", -178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", -177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", -177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", -176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", -173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf CMost", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Credo 40", 0, 0,
{ 8035, 435, -962, -6001, 13872, 2320, -1159, 3065, 5434 } },
{ "Leaf Credo 50", 0, 0,
{ 3984, 0, 0, 0, 10000, 0, 0, 0, 7666 } },
{ "Leaf Credo 60", 0, 0,
{ 8035, 435, -962, -6001, 13872,2320,-1159,3065,5434} },
{ "Leaf Credo 80", 0, 0,
{ 6294, 686, -712, -5435, 13417, 2211, -1006, 2435, 5042} },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d,
{ 8983,-2942,-963,-6556,14476,2237,-2426,2887,8014 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d,
{ 11368,-3894,-1242,-6521,14358,2339,-2475,3056,7285 } },
{ "Minolta DiMAGE 7", 0, 0xf7d,
{ 9144,-2777,-998,-6676,14556,2281,-2470,3019,7744 } },
{ "Minolta DiMAGE A1", 0, 0xf8b,
{ 9274,-2547,-1167,-8220,16323,1943,-2273,2720,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0,
{ 7577,-2166,-926,-7454,15592,1934,-2377,2808,8606 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5710,-901,-615,-8594,16617,2024,-2975,4120,6830 } },
{ "Nikon D2X", 0, 0,
{ 10231,-2769,-1255,-8301,15900,2552,-797,680,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D3300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4S", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D4", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon Df", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D5300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D5500", 0, 0, /* DJC */
{ 5765,-2176,184,-3736,9072,4664,-1028,2213,9259 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{"Nikon D610",0, 0,
{ 10426,-4005,-444,-3565,11764,1403,-1206,2266,6549 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D750", 0, 0,
{ 9020,-2890,-715,-4535,12436,2348,-934,1919,7086 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D810", 0, 0,
{ 9369,-3195,-791,-4488,12430,2301,-893,1796,6872 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711} },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5400", 0, 0,
{ 9349,-2987,-1001,-7919,15766,2266,-2098,2680,6839 } },
{ "Nikon E5700", 0, 0,
{ -5368,11478,2368,5537,-113,3148,-4969,10021,5782,778,9028,211 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX P330", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P340", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", -3200, 0, /* LibRaw */
{ 13443,-6418,-673,-1309,10025,1131,-462,1827,4782 } },
{ "Nikon 1 V3", -200, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J4", 0, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 S2", 200, 0,
{ 6612,-1342,-618,-3338,11055,2623,-174,1792,5075 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0,
{ 8144,-2671,-473,-1740,9834,1601,-58,1971,4296 } },
{ "Nikon 1 AW1", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 ", 0, 0, /* J1, J2, S1, V1 */
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus C5050", 0, 0,
{ 10508,-3124,-1273,-6079,14294,1901,-1653,2306,6237 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc,
{ 12745,-4500,-1416,-6062,14542,1580,-1934,2256,6603 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc,
{ 13173,-4732,-1499,-5807,14036,1895,-2045,2452,7142 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL6", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL7", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M10", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M1", 0, 0,
{ 7687,-1984,-606,-4327,11928,2721,-1381,2339,6452 } },
{ "Olympus E-M5MarkII", 0, 0, /* DJC */
{ 6617,-2589,139,-2917,8499,4419,-884,1913,6829 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{"Olympus STYLUS1",0, 0,
{ 11976,-5518,-545,-1419,10472,846,-475,1766,4524 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision", 0, 0, /* DJC */
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } },
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax K10D", 0, 0,
{ 9566,-2863,-803,-7170,15172,2112,-818,803,9705 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-m", 0, 0,
{ 11057,-3604,-1155,-5152,13046,2329,-282,375,8104 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-3", 0, 0,
{ 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax K-S1", 0, 0,
{ 8512,-3211,-787,-4167,11966,2487,-638,1288,6054 } },
{ "Pentax MX-1", 0, 0,
{ 8804,-2523,-1238,-2423,11627,860,-682,1774,4753 } },
{ "Pentax Q10", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Panasonic DMC-CM1", -15, 0,
{ 8770, -3194,-820,-2871,11281,1803,-513,1552,4434} },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", -15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", -15, 0,
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", -15, 0,
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-FZ7", -15, 0,
{ 11532,-4324,-1066,-2375,10847,1749,-564,1699,4351 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", -15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LX100", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX (Typ 109)", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Panasonic DMC-LF1", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C (Typ 112)", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-Lux (Typ 109)", 0, 0xf7f, /* LibRaw */
{ 10031,-4555,-456,-3024,11520,1091,-1342,2611,4752 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-FZ1000", -15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Leica V-LUX (Typ 114)", 15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Panasonic DMC-FZ100", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ200", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", -15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", -15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", -15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", -15, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", -15, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", -15, 0xfff,
{ 8294,-2891,-651,-3869,11590,2595,-1183,2267,5352 } },
{ "Panasonic DMC-GF1", -15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", -15, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", -15, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", -15, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", -15, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GF7", -15, 0, /* DJC */
{ 6086,-2691,-18,-4207,9767,4441,-1486,2640,7441 } },
{ "Panasonic DMC-GH1", -15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", -15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", -15, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GH4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Panasonic DMC-GM1", -15, 0,
{ 6770,-1895,-744,-5232,13145,2303,-1664,2691,5703 } },
{ "Panasonic DMC-GM5", -15, 0,
{ 8238,-3244,-679,-3921,11814,2384,-836,2022,5852 } },
{ "Panasonic DMC-GX1", -15, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{"Panasonic DMC-GX7", -15,0, /* LibRaw */
{7541,-2355,-591,-3163,10598,1894,-933,2109,5006}},
{"Panasonic DMC-TZ6",-15, 0,
{ 15964,-8332,-389,1756,7198,383,862,784,1995 } },
{"Panasonic DMC-ZS4",-15, 0,
{ 15964,-8332,-389,1756,7198,383,862,784,1995 } },
{ "Panasonic DMC-TZ7",-15, 0,
{ 7901,-2472,-600,-3298,10720,2210,-864,2205,5064 } },
{ "Panasonic DMC-ZS5",-15, 0, /* same ID as Panasonic DMC-TZ70 */
{ 7901,-2472,-600,-3298,10720,2210,-864,2205,5064 } },
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{"Phase One IQ250",0, 0,
{ 4396,-153,-249,-5267,12249,2657,-1397,2323,6014 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P 30", 0, 0,
{ 4516,-245,-37,-7020,14976,2173,-3206,4671,7087 } },
{ "Phase One P 45", 0, 0,
{ 5053,-24,-117,-5684,14076,1702,-2619,4492,5849 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Samsung EK-GN120", 0, 0, /* Adobe; Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung NX mini", 0, 0,
{ 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } },
{ "Samsung NX3000", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX30", 0, 0, /* NX30, NX300, NX300M */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* NX20, NX200, NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX11", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX10", 0, 0, /* also NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX5", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX1", -128, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX-1", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung GX20", 0, 0, /* copied from Pentax K20D */
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
// Foveon: LibRaw color data
{"Sigma dp1 Quattro",2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{"Sigma dp2 Quattro",2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma SD9", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD10", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD14", 15, 16383, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
{ "Sigma SD15", 15, 4095, /* LibRaw */
{ 14082,-2201,-1056,-5243,14788,167,-121,196,8881 } },
// Merills + SD1
{ "Sigma SD1", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP1 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP2 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP3 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
// Sigma DP (non-Merill Versions)
{ "Sigma DP", 0, 4095, /* LibRaw */
// { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ 13100,-3638,-847,6855,2369,580,2723,3218,3251 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", -512, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{ "Sony DSC-RX100M", -800, 0, /* M2 and M3 */
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Sony DSC-RX100", -800, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{"Sony DSC-RX10",0, 0,
{ 8562,-3595,-385,-2715,11089,1128,-1023,2081,4400 } },
{ "Sony DSC-RX1R", -512, 0,
{ 8195,-2800,-422,-4261,12273,1709,-1505,2400,5624 } },
{ "Sony DSC-RX1", -512, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", -512, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", -512, 0xfeb,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A500", -512, 0xfeb,
{ 6046,-1127,-278,-5574,13076,2786,-691,1419,7625 } },
{ "Sony DSLR-A5", -512, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", -512, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", -512, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", -512, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{ "Sony ILCA-77M2", -512, 0,
{ 5991,-1732,-443,-4100,11989,2381,-704,1467,5992 } },
{ "Sony ILCE-7M2", -512, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-7S", -512, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7R", -512, 0,
{ 4913,-541,-202,-6130,13513,2906,-1564,2151,7183 } },
{ "Sony ILCE-7", -512, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE", -512, 0, /* 3000, 5000, 5100, 6000, and QX1 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5N", -512, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", -512, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-5T", -512, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", -512, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", -512, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", -512, 0, /* Adobe */
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", -512, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", -512, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX", -512, 0, /* NEX-C3, NEX-F3 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", -512, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", -512, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", -512, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", -512, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", -512, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", -512, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", -512, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", -512, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", -512, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
};
double cam_xyz[4][3];
char name[130];
int i, j;
int bl4=(cblack[0]+cblack[1]+cblack[2]+cblack[3])/4,bl64=0;
if(cblack[4]*cblack[5]>0)
{
for (unsigned c = 0; c < 4096 && c < cblack[4]*cblack[5]; c++)
bl64+=cblack[c+6];
bl64 /= cblack[4]*cblack[5];
}
int rblack = black+bl4+bl64;
sprintf (name, "%s %s", t_make, t_model);
for (i=0; i < sizeof table / sizeof *table; i++)
if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix))) {
if (table[i].t_black>0)
{
black = (ushort) table[i].t_black;
memset(cblack,0,sizeof(cblack));
}
else if(table[i].t_black <0 && rblack == 0 )
{
black = (ushort) (-table[i].t_black);
memset(cblack,0,sizeof(cblack));
}
if (table[i].t_maximum) maximum = (ushort) table[i].t_maximum;
if (table[i].trans[0]) {
for (raw_color = j=0; j < 12; j++)
#ifdef LIBRAW_LIBRARY_BUILD
if(internal_only)
imgdata.color.cam_xyz[0][j] = table[i].trans[j] / 10000.0;
else
imgdata.color.cam_xyz[0][j] =
#endif
cam_xyz[0][j] = table[i].trans[j] / 10000.0;
#ifdef LIBRAW_LIBRARY_BUILD
if(!internal_only)
#endif
cam_xyz_coeff (rgb_cam, cam_xyz);
}
break;
}
}
void CLASS simple_coeff (int index)
{
static const float table[][12] = {
/* index 0 -- all Foveon cameras */
{ 1.4032,-0.2231,-0.1016,-0.5263,1.4816,0.017,-0.0112,0.0183,0.9113 },
/* index 1 -- Kodak DC20 and DC25 */
{ 2.25,0.75,-1.75,-0.25,-0.25,0.75,0.75,-0.25,-0.25,-1.75,0.75,2.25 },
/* index 2 -- Logitech Fotoman Pixtura */
{ 1.893,-0.418,-0.476,-0.495,1.773,-0.278,-1.017,-0.655,2.672 },
/* index 3 -- Nikon E880, E900, and E990 */
{ -1.936280, 1.800443, -1.448486, 2.584324,
1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705 }
};
int i, c;
for (raw_color = i=0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i*colors+c];
}
short CLASS guess_byte_order (int words)
{
uchar test[4][2];
int t=2, msb;
double diff, sum[2] = {0,0};
fread (test[0], 2, 2, ifp);
for (words-=2; words--; ) {
fread (test[t], 2, 1, ifp);
for (msb=0; msb < 2; msb++) {
diff = (test[t^2][msb] << 8 | test[t^2][!msb])
- (test[t ][msb] << 8 | test[t ][!msb]);
sum[msb] += diff*diff;
}
t = (t+1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green (int bps, int bite, int off0, int off1)
{
UINT64 bitbuf=0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[]={0,0};
FORC(2) {
fseek (ifp, c ? off1:off0, SEEK_SET);
for (vbits=col=0; col < width; col++) {
for (vbits -= bps; vbits < 0; vbits += bite) {
bitbuf <<= bite;
for (i=0; i < bite; i+=8)
bitbuf |= (unsigned) (fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64-bps-vbits) >> (64-bps);
}
}
FORC(width-1) {
sum[ c & 1] += ABS(img[0][c]-img[1][c+1]);
sum[~c & 1] += ABS(img[1][c]-img[0][c+1]);
}
return 100 * log(sum[0]/sum[1]);
}
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{ 3130, 1743, 4, 0, -6, 0 },
{ 3130, 2055, 4, 0, -6, 0 },
{ 3130, 2319, 4, 0, -6, 0 },
{ 3170, 2103, 18, 0,-42, 20 },
{ 3170, 2367, 18, 13,-42,-21 },
{ 3177, 2367, 0, 0, -1, 0 },
{ 3304, 2458, 0, 0, -1, 0 },
{ 3330, 2463, 9, 0, -5, 0 },
{ 3330, 2479, 9, 0,-17, 4 },
{ 3370, 1899, 15, 0,-44, 20 },
{ 3370, 2235, 15, 0,-44, 20 },
{ 3370, 2511, 15, 10,-44,-21 },
{ 3690, 2751, 3, 0, -8, -3 },
{ 3710, 2751, 0, 0, -3, 0 },
{ 3724, 2450, 0, 0, 0, -2 },
{ 3770, 2487, 17, 0,-44, 19 },
{ 3770, 2799, 17, 15,-44,-19 },
{ 3880, 2170, 6, 0, -6, 0 },
{ 4060, 3018, 0, 0, 0, -2 },
{ 4290, 2391, 3, 0, -8, -1 },
{ 4330, 2439, 17, 15,-44,-19 },
{ 4508, 2962, 0, 0, -3, -4 },
{ 4508, 3330, 0, 0, -3, -6 },
};
static const ushort canon[][11] = {
{ 1944, 1416, 0, 0, 48, 0 },
{ 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 },
{ 2224, 1456, 48, 6, 0, 2 },
{ 2376, 1728, 12, 6, 52, 2 },
{ 2672, 1968, 12, 6, 44, 2 },
{ 3152, 2068, 64, 12, 0, 0, 16 },
{ 3160, 2344, 44, 12, 4, 4 },
{ 3344, 2484, 4, 6, 52, 6 },
{ 3516, 2328, 42, 14, 0, 0 },
{ 3596, 2360, 74, 12, 0, 0 },
{ 3744, 2784, 52, 12, 8, 12 },
{ 3944, 2622, 30, 18, 6, 2 },
{ 3948, 2622, 42, 18, 0, 2 },
{ 3984, 2622, 76, 20, 0, 2, 14 },
{ 4104, 3048, 48, 12, 24, 12 },
{ 4116, 2178, 4, 2, 0, 0 },
{ 4152, 2772, 192, 12, 0, 0 },
{ 4160, 3124, 104, 11, 8, 65 },
{ 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 },
{ 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 },
{ 4312, 2876, 22, 18, 0, 2 },
{ 4352, 2874, 62, 18, 0, 0 },
{ 4476, 2954, 90, 34, 0, 0 },
{ 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 },
{ 4480, 3366, 80, 50, 0, 0 },
{ 4496, 3366, 80, 50, 12, 0 },
{ 4768, 3516, 96, 16, 0, 0, 0, 16 },
{ 4832, 3204, 62, 26, 0, 0 },
{ 4832, 3228, 62, 51, 0, 0 },
{ 5108, 3349, 98, 13, 0, 0 },
{ 5120, 3318, 142, 45, 62, 0 },
{ 5280, 3528, 72, 52, 0, 0 },
{ 5344, 3516, 142, 51, 0, 0 },
{ 5344, 3584, 126,100, 0, 2 },
{ 5360, 3516, 158, 51, 0, 0 },
{ 5568, 3708, 72, 38, 0, 0 },
{ 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 },
{ 5712, 3774, 62, 20, 10, 2 },
{ 5792, 3804, 158, 51, 0, 0 },
{ 5920, 3950, 122, 80, 2, 0 },
};
static const struct {
ushort id;
char t_model[20];
} unique[] = {
{ 0x001, "EOS-1D" },
{ 0x167, "EOS-1DS" },
{ 0x168, "EOS 10D" },
{ 0x169, "EOS-1D Mark III" },
{ 0x170, "EOS 300D" },
{ 0x174, "EOS-1D Mark II" },
{ 0x175, "EOS 20D" },
{ 0x176, "EOS 450D" },
{ 0x188, "EOS-1Ds Mark II" },
{ 0x189, "EOS 350D" },
{ 0x190, "EOS 40D" },
{ 0x213, "EOS 5D" },
{ 0x215, "EOS-1Ds Mark III" },
{ 0x218, "EOS 5D Mark II" },
{ 0x232, "EOS-1D Mark II N" },
{ 0x234, "EOS 30D" },
{ 0x236, "EOS 400D" },
{ 0x250, "EOS 7D" },
{ 0x252, "EOS 500D" },
{ 0x254, "EOS 1000D" },
{ 0x261, "EOS 50D" },
{ 0x269, "EOS-1D X" },
{ 0x270, "EOS 550D" },
{ 0x281, "EOS-1D Mark IV" },
{ 0x285, "EOS 5D Mark III" },
{ 0x286, "EOS 600D" },
{ 0x287, "EOS 60D" },
{ 0x288, "EOS 1100D" },
{ 0x289, "EOS 7D Mark II" },
{ 0x301, "EOS 650D" },
{ 0x302, "EOS 6D" },
{ 0x324, "EOS-1D C" },
{ 0x325, "EOS 70D" },
{ 0x326, "EOS 700D" },
{ 0x327, "EOS 1200D" },
{ 0x331, "EOS M" },
{ 0x335, "EOS M2" },
{ 0x346, "EOS 100D" },
{ 0x347, "EOS 760D" },
{ 0x382, "EOS 5DS" },
{ 0x393, "EOS 750D" },
{ 0x401, "EOS 5DS R" },
}, sonique[] = {
{ 0x002, "DSC-R1" }, { 0x100, "DSLR-A100" },
{ 0x101, "DSLR-A900" }, { 0x102, "DSLR-A700" },
{ 0x103, "DSLR-A200" }, { 0x104, "DSLR-A350" },
{ 0x105, "DSLR-A300" },
{262,"DSLR-A900"},
{263,"DSLR-A380"},
{ 0x108, "DSLR-A330" },
{ 0x109, "DSLR-A230" }, { 0x10a, "DSLR-A290" },
{ 0x10d, "DSLR-A850" },
{270,"DSLR-A850"},
{ 0x111, "DSLR-A550" },
{ 0x112, "DSLR-A500" }, { 0x113, "DSLR-A450" },
{ 0x116, "NEX-5" }, { 0x117, "NEX-3" },
{ 0x118, "SLT-A33" }, { 0x119, "SLT-A55V" },
{ 0x11a, "DSLR-A560" }, { 0x11b, "DSLR-A580" },
{ 0x11c, "NEX-C3" }, { 0x11d, "SLT-A35" },
{ 0x11e, "SLT-A65V" }, { 0x11f, "SLT-A77V" },
{ 0x120, "NEX-5N" }, { 0x121, "NEX-7" },
{290,"NEX-VG20E"},
{ 0x123, "SLT-A37" }, { 0x124, "SLT-A57" },
{ 0x125, "NEX-F3" }, { 0x126, "SLT-A99V" },
{ 0x127, "NEX-6" }, { 0x128, "NEX-5R" },
{ 0x129, "DSC-RX100" }, { 0x12a, "DSC-RX1" },
{299,"NEX-VG900"},
{300,"NEX-VG30E"},
{ 0x12e, "ILCE-3000" }, { 0x12f, "SLT-A58" },
{ 0x131, "NEX-3N" }, { 0x132, "ILCE-7" },
{ 0x133, "NEX-5T" }, { 0x134, "DSC-RX100M2" },
{ 0x135, "DSC-RX10" }, { 0x136, "DSC-RX1R" },
{ 0x137, "ILCE-7R" }, { 0x138, "ILCE-6000" },
{ 0x139, "ILCE-5000" }, { 0x13d, "DSC-RX100M3" },
{ 0x13e, "ILCE-7S" }, { 0x13f, "ILCA-77M2" },
{ 0x153, "ILCE-5100" }, { 0x154, "ILCE-7M2" },
{ 0x15a, "ILCE-QX1" },
};
static const struct {
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char t_make[10], t_model[20];
ushort offset;
} table[] = {
{ 786432,1024, 768, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-080C" },
{ 1447680,1392,1040, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-145C" },
{ 1920000,1600,1200, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-201C" },
{ 5067304,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C" },
{ 5067316,2588,1958, 0, 0, 0, 0, 0,0x94,0,0,"AVT","F-510C",12 },
{ 10134608,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C" },
{ 10134620,2588,1958, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-510C",12 },
{ 16157136,3272,2469, 0, 0, 0, 0, 9,0x94,0,0,"AVT","F-810C" },
{ 15980544,3264,2448, 0, 0, 0, 0, 8,0x61,0,1,"AgfaPhoto","DC-833m" },
{ 9631728,2532,1902, 0, 0, 0, 0,96,0x61,0,0,"Alcatel","5035D" },
// Android Raw dumps id start
// File Size in bytes Horizontal Res Vertical Flag then bayer order eg 0x16 bbgr 0x94 rggb
{ 16424960,4208,3120, 0, 0, 0, 0, 1,0x16,0,0,"Sony","IMX135-mipi 13mp" },
{ 17522688,4212,3120, 0, 0, 0, 0, 0,0x16,0,0,"Sony","IMX135-QCOM" },
{ 10223360,2608,1960, 0, 0, 0, 0, 1,0x94,0,0,"Sony","IMX072-mipi" },
{ 5107712,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"HTC","UltraPixel" },
{ 1540857,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"Samsung","S3" },
{ 10223363,2688,1520, 0, 0, 0, 0, 1,0x61,0,0,"Samsung","GalaxyNexus" },
// Android Raw dumps id end
{ 2868726,1384,1036, 0, 0, 0, 0,64,0x49,0,8,"Baumer","TXG14",1078 },
{ 5298000,2400,1766,12,12,44, 2,40,0x94,0,2,"Canon","PowerShot SD300" },
{ 6553440,2664,1968, 4, 4,44, 4,40,0x94,0,2,"Canon","PowerShot A460" },
{ 6573120,2672,1968,12, 8,44, 0,40,0x94,0,2,"Canon","PowerShot A610" },
{ 6653280,2672,1992,10, 6,42, 2,40,0x94,0,2,"Canon","PowerShot A530" },
{ 7710960,2888,2136,44, 8, 4, 0,40,0x94,0,2,"Canon","PowerShot S3 IS" },
{ 9219600,3152,2340,36,12, 4, 0,40,0x94,0,2,"Canon","PowerShot A620" },
{ 9243240,3152,2346,12, 7,44,13,40,0x49,0,2,"Canon","PowerShot A470" },
{ 10341600,3336,2480, 6, 5,32, 3,40,0x94,0,2,"Canon","PowerShot A720 IS" },
{ 10383120,3344,2484,12, 6,44, 6,40,0x94,0,2,"Canon","PowerShot A630" },
{ 12945240,3736,2772,12, 6,52, 6,40,0x94,0,2,"Canon","PowerShot A640" },
{ 15636240,4104,3048,48,12,24,12,40,0x94,0,2,"Canon","PowerShot A650" },
{ 15467760,3720,2772, 6,12,30, 0,40,0x94,0,2,"Canon","PowerShot SX110 IS" },
{ 15534576,3728,2778,12, 9,44, 9,40,0x94,0,2,"Canon","PowerShot SX120 IS" },
{ 18653760,4080,3048,24,12,24,12,40,0x94,0,2,"Canon","PowerShot SX20 IS" },
{ 19131120,4168,3060,92,16, 4, 1,40,0x94,0,2,"Canon","PowerShot SX220 HS" },
{ 21936096,4464,3276,25,10,73,12,40,0x16,0,2,"Canon","PowerShot SX30 IS" },
{ 24724224,4704,3504, 8,16,56, 8,40,0x49,0,2,"Canon","PowerShot A3300 IS" },
{ 1976352,1632,1211, 0, 2, 0, 1, 0,0x94,0,1,"Casio","QV-2000UX" },
{ 3217760,2080,1547, 0, 0,10, 1, 0,0x94,0,1,"Casio","QV-3*00EX" },
{ 6218368,2585,1924, 0, 0, 9, 0, 0,0x94,0,1,"Casio","QV-5700" },
{ 7816704,2867,2181, 0, 0,34,36, 0,0x16,0,1,"Casio","EX-Z60" },
{ 2937856,1621,1208, 0, 0, 1, 0, 0,0x94,7,13,"Casio","EX-S20" },
{ 4948608,2090,1578, 0, 0,32,34, 0,0x94,7,1,"Casio","EX-S100" },
{ 6054400,2346,1720, 2, 0,32, 0, 0,0x94,7,1,"Casio","QV-R41" },
{ 7426656,2568,1928, 0, 0, 0, 0, 0,0x94,0,1,"Casio","EX-P505" },
{ 7530816,2602,1929, 0, 0,22, 0, 0,0x94,7,1,"Casio","QV-R51" },
{ 7542528,2602,1932, 0, 0,32, 0, 0,0x94,7,1,"Casio","EX-Z50" },
{ 7562048,2602,1937, 0, 0,25, 0, 0,0x16,7,1,"Casio","EX-Z500" },
{ 7753344,2602,1986, 0, 0,32,26, 0,0x94,7,1,"Casio","EX-Z55" },
{ 9313536,2858,2172, 0, 0,14,30, 0,0x94,7,1,"Casio","EX-P600" },
{ 10834368,3114,2319, 0, 0,27, 0, 0,0x94,0,1,"Casio","EX-Z750" },
{ 10843712,3114,2321, 0, 0,25, 0, 0,0x94,0,1,"Casio","EX-Z75" },
{ 10979200,3114,2350, 0, 0,32,32, 0,0x94,7,1,"Casio","EX-P700" },
{ 12310144,3285,2498, 0, 0, 6,30, 0,0x94,0,1,"Casio","EX-Z850" },
{ 12489984,3328,2502, 0, 0,47,35, 0,0x94,0,1,"Casio","EX-Z8" },
{ 15499264,3754,2752, 0, 0,82, 0, 0,0x94,0,1,"Casio","EX-Z1050" },
{ 18702336,4096,3044, 0, 0,24, 0,80,0x94,7,1,"Casio","EX-ZR100" },
{ 7684000,2260,1700, 0, 0, 0, 0,13,0x94,0,1,"Casio","QV-4000" },
{ 787456,1024, 769, 0, 1, 0, 0, 0,0x49,0,0,"Creative","PC-CAM 600" },
{ 28829184,4384,3288, 0, 0, 0, 0,36,0x61,0,0,"DJI" },
{ 15151104,4608,3288, 0, 0, 0, 0, 0,0x94,0,0,"Matrix" },
{ 3840000,1600,1200, 0, 0, 0, 0,65,0x49,0,0,"Foculus","531C" },
{ 307200, 640, 480, 0, 0, 0, 0, 0,0x94,0,0,"Generic" },
{ 62464, 256, 244, 1, 1, 6, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 124928, 512, 244, 1, 1,10, 1, 0,0x8d,0,0,"Kodak","DC20" },
{ 1652736,1536,1076, 0,52, 0, 0, 0,0x61,0,0,"Kodak","DCS200" },
{ 4159302,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330" },
{ 4162462,2338,1779, 1,33, 1, 2, 0,0x94,0,0,"Kodak","C330",3160 },
{ 2247168,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 3370752,1232, 912, 0, 0,16, 0, 0,0x00,0,0,"Kodak","C330" },
{ 6163328,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603" },
{ 6166488,2864,2152, 0, 0, 0, 0, 0,0x94,0,0,"Kodak","C603",3160 },
{ 460800, 640, 480, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 9116448,2848,2134, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","C603" },
{ 12241200,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP" },
{ 12272756,4040,3030, 2, 0, 0,13, 0,0x49,0,0,"Kodak","12MP",31556 },
{ 18000000,4000,3000, 0, 0, 0, 0, 0,0x00,0,0,"Kodak","12MP" },
{ 614400, 640, 480, 0, 3, 0, 0,64,0x94,0,0,"Kodak","KAI-0340" },
{ 15360000,3200,2400, 0, 0, 0, 0,96,0x16,0,0,"Lenovo","A820" },
{ 3884928,1608,1207, 0, 0, 0, 0,96,0x16,0,0,"Micron","2010",3212 },
{ 1138688,1534, 986, 0, 0, 0, 0, 0,0x61,0,0,"Minolta","RD175",513 },
{ 1581060,1305, 969, 0, 0,18, 6, 6,0x1e,4,1,"Nikon","E900" },
{ 2465792,1638,1204, 0, 0,22, 1, 6,0x4b,5,1,"Nikon","E950" },
{ 2940928,1616,1213, 0, 0, 0, 7,30,0x94,0,1,"Nikon","E2100" },
{ 4771840,2064,1541, 0, 0, 0, 1, 6,0xe1,0,1,"Nikon","E990" },
{ 4775936,2064,1542, 0, 0, 0, 0,30,0x94,0,1,"Nikon","E3700" },
{ 5865472,2288,1709, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E4500" },
{ 5869568,2288,1710, 0, 0, 0, 0, 6,0x16,0,1,"Nikon","E4300" },
{ 7438336,2576,1925, 0, 0, 0, 1, 6,0xb4,0,1,"Nikon","E5000" },
{ 8998912,2832,2118, 0, 0, 0, 0,30,0x94,7,1,"Nikon","COOLPIX S6" },
{ 5939200,2304,1718, 0, 0, 0, 0,30,0x16,0,0,"Olympus","C770UZ" },
{ 3178560,2064,1540, 0, 0, 0, 0, 0,0x94,0,1,"Pentax","Optio S" },
{ 4841984,2090,1544, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S" },
{ 6114240,2346,1737, 0, 0,22, 0, 0,0x94,7,1,"Pentax","Optio S4" },
{ 10702848,3072,2322, 0, 0, 0,21,30,0x94,0,1,"Pentax","Optio 750Z" },
{ 13248000,2208,3000, 0, 0, 0, 0,13,0x61,0,0,"Pixelink","A782" },
{ 6291456,2048,1536, 0, 0, 0, 0,96,0x61,0,0,"RoverShot","3320AF" },
{ 311696, 644, 484, 0, 0, 0, 0, 0,0x16,0,8,"ST Micro","STV680 VGA" },
{ 16098048,3288,2448, 0, 0,24, 0, 9,0x94,0,1,"Samsung","S85" },
{ 16215552,3312,2448, 0, 0,48, 0, 9,0x94,0,1,"Samsung","S85" },
{ 20487168,3648,2808, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 24000000,4000,3000, 0, 0, 0, 0,13,0x94,5,1,"Samsung","WB550" },
{ 12582980,3072,2048, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 33292868,4080,4080, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 44390468,4080,5440, 0, 0, 0, 0,33,0x61,0,0,"Sinar","",68 },
{ 1409024,1376,1024, 0, 0, 1, 0, 0,0x49,0,0,"Sony","XCD-SX910CR" },
{ 2818048,1376,1024, 0, 0, 1, 0,97,0x49,0,0,"Sony","XCD-SX910CR" },
};
static const char *corp[] =
{ "AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm",
"Mamiya", "Minolta", "Motorola", "Kodak", "Konica", "Leica",
"Nikon", "Nokia", "Olympus", "Pentax", "Phase One", "Ricoh",
"Samsung", "Sigma", "Sinar", "Sony" };
char head[32], *cp;
int hlen, flen, fsize, zero_fsize=1, i, c;
struct jhead jh;
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset (tiff_ifd, 0, sizeof tiff_ifd);
memset (gpsdata, 0, sizeof gpsdata);
memset (cblack, 0, sizeof cblack);
memset (white, 0, sizeof white);
memset (mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i=0; i < 4; i++) {
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i=0; i < 0x10000; i++) curve[i] = i;
order = get2();
hlen = get4();
fseek (ifp, 0, SEEK_SET);
fread (head, 1, 32, ifp);
fseek (ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *) memmem (head, 32, (char*)"MMMM", 4)) ||
(cp = (char *) memmem (head, 32, (char*)"IIII", 4))) {
parse_phase_one (cp-head);
if (cp-head && parse_tiff(0)) apply_tiff();
} else if (order == 0x4949 || order == 0x4d4d) {
if (!memcmp (head+6,"HEAPCCDR",8)) {
data_offset = hlen;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff (hlen, flen-hlen, 0);
load_raw = &CLASS canon_load_raw;
} else if (parse_tiff(0)) apply_tiff();
} else if (!memcmp (head,"\xff\xd8\xff\xe1",4) &&
!memcmp (head+6,"Exif",4)) {
fseek (ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek (ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
} else if (!memcmp (head+25,"ARECOYK",7)) {
strcpy (make, "Contax");
strcpy (model,"N Digital");
fseek (ifp, 33, SEEK_SET);
get_timestamp(1);
fseek (ifp, 52, SEEK_SET);
switch (get4()) {
case 7: iso_speed = 25; break;
case 8: iso_speed = 32; break;
case 9: iso_speed = 40; break;
case 10: iso_speed = 50; break;
case 11: iso_speed = 64; break;
case 12: iso_speed = 80; break;
case 13: iso_speed = 100; break;
case 14: iso_speed = 125; break;
case 15: iso_speed = 160; break;
case 16: iso_speed = 200; break;
case 17: iso_speed = 250; break;
case 18: iso_speed = 320; break;
case 19: iso_speed = 400; break;
}
shutter = powf64(2.0f, (((float)get4())/8.0f)) / 16000.0f;
FORC4 cam_mul[c ^ (c >> 1)] = get4();
fseek (ifp, 88, SEEK_SET);
aperture = powf64(2.0f, ((float)get4())/16.0f);
fseek (ifp, 112, SEEK_SET);
focal_len = get4();
#ifdef LIBRAW_LIBRARY_BUILD
fseek (ifp, 104, SEEK_SET);
imgdata.lens.makernotes.MaxAp4CurFocal = powf64(2.0f, ((float)get4())/16.0f);
fseek (ifp, 124, SEEK_SET);
fread(imgdata.lens.makernotes.Lens, 32, 1, ifp);
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Contax_N;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Contax_N;
#endif
} else if (!strcmp (head, "PXN")) {
strcpy (make, "Logitech");
strcpy (model,"Fotoman Pixtura");
} else if (!strcmp (head, "qktk")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
} else if (!strcmp (head, "qktn")) {
strcpy (make, "Apple");
strcpy (model,"QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
} else if (!memcmp (head,"FUJIFILM",8)) {
fseek (ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek (ifp, 92, SEEK_SET);
parse_fuji (get4());
if (thumb_offset > 120) {
fseek (ifp, 120, SEEK_SET);
is_raw += (i = get4()) && 1;
if (is_raw == 2 && shot_select)
parse_fuji (i);
}
load_raw = &CLASS unpacked_load_raw;
fseek (ifp, 100+28*(shot_select > 0), SEEK_SET);
parse_tiff (data_offset = get4());
parse_tiff (thumb_offset+12);
apply_tiff();
} else if (!memcmp (head,"RIFF",4)) {
fseek (ifp, 0, SEEK_SET);
parse_riff();
} else if (!memcmp (head+4,"ftypqt ",9)) {
fseek (ifp, 0, SEEK_SET);
parse_qt (fsize);
is_raw = 0;
} else if (!memcmp (head,"\0\001\0\001\0@",6)) {
fseek (ifp, 6, SEEK_SET);
fread (make, 1, 8, ifp);
fread (model, 1, 8, ifp);
fread (model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
} else if (!memcmp (head,"NOKIARAW",8)) {
strcpy (make, "NOKIA");
order = 0x4949;
fseek (ifp, 300, SEEK_SET);
data_offset = get4();
i = get4();
width = get2();
height = get2();
switch (tiff_bps = i*8 / (width * height)) {
case 8: load_raw = &CLASS eight_bit_load_raw; break;
case 10: load_raw = &CLASS nokia_load_raw;
}
raw_height = height + (top_margin = i / (width * tiff_bps/8) - height);
mask[0][3] = 1;
filters = 0x61616161;
} else if (!memcmp (head,"ARRI",4)) {
order = 0x4949;
fseek (ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy (make, "ARRI");
fseek (ifp, 668, SEEK_SET);
fread (model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
} else if (!memcmp (head,"XPDS",4)) {
order = 0x4949;
fseek (ifp, 0x800, SEEK_SET);
fread (make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek (ifp, 56, SEEK_CUR);
fread (model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
gamma_curve (0, 12.25, 1, 1023);
} else if (!memcmp (head+4,"RED1",4)) {
strcpy (make, "Red");
strcpy (model,"One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve (1/2.4, 12.92, 1, 4095);
filters = 0x49494949;
} else if (!memcmp (head,"DSC-Image",9))
parse_rollei();
else if (!memcmp (head,"PWAD",4))
parse_sinar_ia();
else if (!memcmp (head,"\0MRM",4))
parse_minolta(0);
else if (!memcmp (head,"FOVb",4))
{
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!imgdata.params.force_foveon_x3f)
parse_foveon();
else
#endif
parse_x3f();
#else
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
parse_foveon();
#endif
#endif
}
else if (!memcmp (head,"CI",2))
parse_cine();
else
for (zero_fsize=i=0; i < sizeof table / sizeof *table; i++)
if (fsize == table[i].fsize) {
strcpy (make, table[i].t_make );
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(make, "Canon"))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
#endif
strcpy (model, table[i].t_model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101 * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize-data_offset)*8 / (raw_width*raw_height)) {
case 6:
load_raw = &CLASS minolta_rd175_load_raw; break;
case 8:
load_raw = &CLASS eight_bit_load_raw; break;
case 10:
if ((fsize-data_offset)/raw_height*3 >= raw_width*4) {
load_raw = &CLASS android_loose_load_raw; break;
} else if (load_flags & 1) {
load_raw = &CLASS android_tight_load_raw; break;
}
case 12:
load_flags |= 128;
load_raw = &CLASS packed_load_raw; break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
}
if (zero_fsize) fsize = 0;
if (make[0] == 0) parse_smal (0, flen);
if (make[0] == 0) {
parse_jpeg(0);
fseek(ifp,0,SEEK_END);
int sz = ftell(ifp);
if (!(strncmp(model,"ov",2) && strncmp(model,"RP_OV",5)) && sz>=6404096 &&
!fseek (ifp, -6404096, SEEK_END) &&
fread (head, 1, 32, ifp) && !strcmp(head,"BRCMn")) {
strcpy (make, "OmniVision");
data_offset = ftell(ifp) + 0x8000-32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
} else is_raw = 0;
}
for (i=0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr (make, corp[i])) /* Simplify company names */
strcpy (make, corp[i]);
if ((!strcmp(make,"Kodak") || !strcmp(make,"Leica")) &&
((cp = strcasestr(model," DIGITAL CAMERA")) ||
(cp = strstr(model,"FILE VERSION"))))
*cp = 0;
if (!strncasecmp(model,"PENTAX",6))
strcpy (make, "Pentax");
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ') *cp = 0;
cp = model + strlen(model);
while (*--cp == ' ') *cp = 0;
i = strlen(make); /* Remove make from model */
if (!strncasecmp (model, make, i) && model[i++] == ' ')
memmove (model, model+i, 64-i);
if (!strncmp (model,"FinePix ",8))
strcpy (model, model+8);
if (!strncmp (model,"Digital Camera ",15))
strcpy (model, model+15);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw) goto notraw;
if (!height) height = raw_height;
if (!width) width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{ height = 2616; width = 3896; }
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{ height = 3124; width = 4688; filters = 0x16161616; }
if (width == 4352 && (!strcmp(model,"K-r") || !strcmp(model,"K-x")))
{ width = 4309; filters = 0x16161616; }
if (width >= 4960 && !strncmp(model,"K-5",3))
{ left_margin = 10; width = 4950; filters = 0x16161616; }
if (width == 4736 && !strcmp(model,"K-7"))
{ height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; }
if (width == 6080 && !strcmp(model,"K-3"))
{ left_margin = 4; width = 6040; }
if (width == 7424 && !strcmp(model,"645D"))
{ height = 5502; width = 7328; filters = 0x61616161; top_margin = 29;
left_margin = 48; }
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version) {
if (filters == UINT_MAX) filters = 0;
if (filters) is_raw = tiff_samples;
else colors = tiff_samples;
switch (tiff_compress) {
case 0: /* Compression not set, assuming uncompressed */
case 1: load_raw = &CLASS packed_dng_load_raw; break;
case 7: load_raw = &CLASS lossless_dng_load_raw; break;
case 34892: load_raw = &CLASS lossy_dng_load_raw; break;
default: load_raw = 0;
}
if (!strcmp(make, "Canon") && unique_id)
{
for (i = 0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
strcpy(model, unique[i].t_model);
break;
}
}
if (!strcasecmp(make, "Sony") && unique_id)
{
for (i = 0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
strcpy(model, sonique[i].t_model);
break;
}
}
goto dng_skip;
}
if (!strcmp(make,"Canon") && !fsize && tiff_bps != 15) {
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i=0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1]) {
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
mask[0][1] = canon[i][6];
mask[0][3] = -canon[i][7];
mask[1][1] = canon[i][8];
mask[1][3] = -canon[i][9];
if (canon[i][10]) filters = canon[i][10] * 0x01010101;
}
if ((unique_id | 0x20000) == 0x2720000) {
left_margin = 8;
top_margin = 16;
}
}
if (!strcmp(make,"Canon") && unique_id)
{
for (i=0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
adobe_coeff ("Canon", unique[i].t_model);
strcpy(model,unique[i].t_model);
}
}
if (!strcasecmp(make,"Sony") && unique_id)
{
for (i=0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
adobe_coeff ("Sony", sonique[i].t_model);
strcpy(model,sonique[i].t_model);
}
}
if (!strcmp(make,"Nikon")) {
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model,"KAI-0340")
&& find_green (16, 16, 3840, 5120) < 25) {
height = 480;
top_margin = filters = 0;
strcpy (model,"C603");
}
if (is_foveon) {
if (height*2 < width) pixel_aspect = 0.5;
if (height > width) pixel_aspect = 2;
filters = 0;
#ifdef LIBRAW_DEMOSAIC_PACK_GPL2
if(!imgdata.params.force_foveon_x3f)
simple_coeff(0);
#endif
} else if (!strcmp(make,"Canon") && tiff_bps == 15) {
switch (width) {
case 3344: width -= 66;
case 3872: width -= 6;
}
if (height > width) SWAP(height,width);
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
} else if (!strcmp(model,"PowerShot 600")) {
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
} else if (!strcmp(model,"PowerShot A5") ||
!strcmp(model,"PowerShot A5 Zoom")) {
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256/235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot A50")) {
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
} else if (!strcmp(model,"PowerShot Pro70")) {
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 40;
} else if (!strcmp(model,"PowerShot Pro90 IS") ||
!strcmp(model,"PowerShot G1")) {
colors = 4;
filters = 0xb4b4b4b4;
} else if (!strcmp(model,"PowerShot A610")) {
if (canon_s2is()) strcpy (model+10, "S2 IS");
} else if (!strcmp(model,"PowerShot SX220 HS")) {
mask[1][3] = -4;
top_margin=16;
left_margin = 92;
} else if (!strcmp(model,"PowerShot S120")) {
raw_width = 4192;
raw_height = 3062;
width = 4022;
height = 3016;
mask[0][0] = top_margin = 31;
mask[0][2] = top_margin + height;
left_margin = 120;
mask[0][1] = 23;
mask[0][3] = 72;
} else if (!strcmp(model,"PowerShot G16")) {
mask[0][0] = 0;
mask[0][2] = 80;
mask[0][1] = 0;
mask[0][3] = 16;
top_margin = 29;
left_margin = 120;
width = raw_width-left_margin-48;
height = raw_height-top_margin-14;
} else if (!strcmp(model,"PowerShot SX50 HS")) {
top_margin = 17;
} else if (!strcmp(model,"EOS D2000C")) {
filters = 0x61616161;
black = curve[200];
} else if (!strcmp(model,"D1")) {
cam_mul[0] *= 256/527.0;
cam_mul[2] *= 256/317.0;
} else if (!strcmp(model,"D1X")) {
width -= 4;
pixel_aspect = 0.5;
} else if (!strcmp(model,"D40X") ||
!strcmp(model,"D60") ||
!strcmp(model,"D80") ||
!strcmp(model,"D3000")) {
height -= 3;
width -= 4;
} else if (!strcmp(model,"D3") ||
!strcmp(model,"D3S") ||
!strcmp(model,"D700")) {
width -= 4;
left_margin = 2;
} else if (!strcmp(model,"D3100")) {
width -= 28;
left_margin = 6;
} else if (!strcmp(model,"D5000") ||
!strcmp(model,"D90")) {
width -= 42;
} else if (!strcmp(model,"D5100") ||
!strcmp(model,"D7000") ||
!strcmp(model,"COOLPIX A")) {
width -= 44;
} else if (!strcmp(model,"D3200") ||
!strncmp(model,"D6",2) ||
!strncmp(model,"D800",4)) {
width -= 46;
} else if (!strcmp(model,"D4") ||
!strcmp(model,"Df")) {
width -= 52;
left_margin = 2;
} else if (!strncmp(model,"D40",3) ||
!strncmp(model,"D50",3) ||
!strncmp(model,"D70",3)) {
width--;
} else if (!strcmp(model,"D100")) {
if (load_flags)
raw_width = (width += 3) + 3;
} else if (!strcmp(model,"D200")) {
left_margin = 1;
width -= 4;
filters = 0x94949494;
} else if (!strncmp(model,"D2H",3)) {
left_margin = 6;
width -= 14;
} else if (!strncmp(model,"D2X",3)) {
if (width == 3264) width -= 32;
else width -= 8;
} else if (!strncmp(model,"D300",4)) {
width -= 32;
} else if (!strcmp(make,"Nikon") && raw_width == 4032) {
if(!strcmp(model,"COOLPIX P7700"))
{
adobe_coeff ("Nikon","COOLPIX P7700");
maximum = 65504;
load_flags = 0;
}
else if(!strcmp(model,"COOLPIX P7800"))
{
adobe_coeff ("Nikon","COOLPIX P7800");
maximum = 65504;
load_flags = 0;
}
else if(!strcmp(model,"COOLPIX P340"))
load_flags=0;
} else if (!strncmp(model,"COOLPIX P",9) && raw_width != 4032) {
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && iso_speed >= 400)
black = 255;
} else if (!strncmp(model,"1 ",2)) {
height -= 2;
} else if (fsize == 1581060) {
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
} else if (fsize == 3178560) {
cam_mul[0] *= 4;
cam_mul[2] *= 4;
} else if (fsize == 4771840) {
if (!timestamp && nikon_e995())
strcpy (model, "E995");
if (strcmp(model,"E995")) {
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
} else if (fsize == 2940928) {
if (!timestamp && !nikon_e2100())
strcpy (model,"E2500");
if (!strcmp(model,"E2500")) {
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
} else if (fsize == 4775936) {
if (!timestamp) nikon_3700();
if (model[0] == 'E' && atoi(model+1) < 3700)
filters = 0x49494949;
if (!strcmp(model,"Optio 33WR")) {
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O') {
i = find_green (12, 32, 1188864, 3576832);
c = find_green (12, 32, 2383920, 2387016);
if (abs(i) < abs(c)) {
SWAP(i,c);
load_flags = 24;
}
if (i < 0) filters = 0x61616161;
}
} else if (fsize == 5869568) {
if (!timestamp && minolta_z2()) {
strcpy (make, "Minolta");
strcpy (model,"DiMAGE Z2");
}
load_flags = 6 + 24*(make[0] == 'M');
} else if (fsize == 6291456) {
fseek (ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d) {
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy (make, "ISG");
model[0] = 0;
}
} else if (!strcmp(make,"Fujifilm")) {
if (!strcmp(model+7,"S2Pro")) {
strcpy (model,"S2Pro");
height = 2144;
width = 2880;
flip = 6;
} else if (load_raw != &CLASS packed_load_raw)
maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00;
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width ) >> 2 << 1;
if (width == 2848 || width == 3664) filters = 0x16161616;
if (width == 4032 || width == 4952) left_margin = 0;
if (width == 3328 && (width -= 66)) left_margin = 34;
if (width == 4936) left_margin = 4;
if (!strcmp(model,"HS50EXR") ||
!strcmp(model,"F900EXR")) {
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if(!strcmp(model,"S5500"))
{
height -= (top_margin=6);
}
if (fuji_layout) raw_width *= is_raw;
if (filters == 9)
FORC(36) xtrans[0][c] =
xtrans_abs[(c/6+top_margin) % 6][(c+left_margin) % 6];
} else if (!strcmp(model,"KD-400Z")) {
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
} else if (!strcmp(model,"KD-510Z")) {
goto konica_510z;
} else if (!strcasecmp(make,"Minolta")) {
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model,"DiMAGE A",8)) {
if (!strcmp(model,"DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"ALPHA",5) ||
!strncmp(model,"DYNAX",5) ||
!strncmp(model,"MAXXUM",6)) {
sprintf (model+20, "DYNAX %-10s", model+6+(model[0]=='M'));
adobe_coeff (make, model+20);
load_raw = &CLASS packed_load_raw;
} else if (!strncmp(model,"DiMAGE G",8)) {
if (model[8] == '4') {
height = 1716;
width = 2304;
} else if (model[8] == '5') {
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
} else if (model[8] == '6') {
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
} else if (!strcmp(model,"*ist D")) {
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
} else if (!strcmp(model,"*ist DS")) {
height -= 2;
} else if (!strcmp(make,"Samsung") && raw_width == 4704) {
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 32;
} else if (!strcmp(make,"Samsung") && !strcmp(model,"NX3000")) {
top_margin = 24;
left_margin = 64;
width = 5472;
height = 3648;
filters = 0x61616161;
colors = 3;
} else if (!strcmp(make,"Samsung") && raw_height == 3714) {
height -= top_margin = 18;
left_margin = raw_width - (width = 5536);
if (raw_width != 5600)
left_margin = top_margin = 0;
filters = 0x61616161;
colors = 3;
} else if (!strcmp(make,"Samsung") && raw_width == 5632) {
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12) load_flags = 80;
} else if (!strcmp(make,"Samsung") && raw_width == 5664) {
height -= top_margin = 17;
left_margin = 96;
width = 5544;
filters = 0x49494949;
} else if (!strcmp(make,"Samsung") && raw_width == 6496) {
filters = 0x61616161;
} else if (!strcmp(model,"EX1")) {
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682) {
height -= 10;
width -= 46;
top_margin = 8;
}
} else if (!strcmp(model,"WB2000")) {
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718) {
height -= 28;
width -= 56;
top_margin = 8;
}
} else if (strstr(model,"WB550")) {
strcpy (model, "WB550");
} else if (!strcmp(model,"EX2F")) {
height = 3045;
width = 4070;
top_margin = 3;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
} else if (!strcmp(model,"STV680 VGA")) {
black = 16;
} else if (!strcmp(model,"N95")) {
height = raw_height - (top_margin = 2);
} else if (!strcmp(model,"640x480")) {
gamma_curve (0.45, 4.5, 1, 255);
} else if (!strcmp(make,"Hasselblad")) {
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262) {
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
if(!strcasecmp(model,"H3D"))
{
adobe_coeff("Hasselblad","H3DII-39");
strcpy(model,"H3DII-39");
}
} else if (raw_width == 7410 || raw_width == 8282) {
height -= 84;
width -= 82;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
adobe_coeff("Hasselblad","H4D-40");
strcpy(model,"H4D-40");
} else if (raw_width == 9044) {
if(black > 500)
{
top_margin = 12;
left_margin = 44;
width = 8956;
height = 6708;
memset(cblack,0,sizeof(cblack));
adobe_coeff("Hasselblad","H4D-60");
strcpy(model,"H4D-60");
black = 512;
}
else
{
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
strcpy(model,"H3DII-60");
}
} else if (raw_width == 4090) {
strcpy (model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
} else if (raw_width == 8282 && raw_height == 6240) {
if(!strcasecmp(model,"H5D"))
{
/* H5D 50*/
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
black = 256;
strcpy(model,"H5D-50");
}
else if(!strcasecmp(model,"H3D"))
{
black=0;
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
memset(cblack,0,sizeof(cblack));
adobe_coeff("Hasselblad","H3D-50");
strcpy(model,"H3D-50");
}
} else if (raw_width == 8374 && raw_height == 6304) {
/* H5D 50c*/
left_margin = 52;
top_margin = 100;
width = 8272;
height = 6200;
black = 256;
strcpy(model,"H5D-50c");
}
if (tiff_samples > 1) {
is_raw = tiff_samples+1;
if (!shot_select && !half_size) filters = 0;
}
} else if (!strcmp(make,"Sinar")) {
if (!load_raw) load_raw = &CLASS unpacked_load_raw;
if (is_raw > 1 && !shot_select && !half_size) filters = 0;
maximum = 0x3fff;
} else if (!strcmp(make,"Leaf")) {
maximum = 0x3fff;
fseek (ifp, data_offset, SEEK_SET);
if (ljpeg_start (&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1) filters = 0;
if (tiff_samples > 1 || tile_length < raw_height) {
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048) {
if (tiff_samples == 1) {
filters = 1;
strcpy (cdesc, "RBTG");
strcpy (model, "CatchLight");
top_margin = 8; left_margin = 18; height = 2032; width = 2016;
} else {
strcpy (model, "DCB2");
top_margin = 10; left_margin = 16; height = 2028; width = 2022;
}
} else if (width+height == 3144+2060) {
if (!model[0]) strcpy (model, "Cantare");
if (width > height) {
top_margin = 6; left_margin = 32; height = 2048; width = 3072;
filters = 0x61616161;
} else {
left_margin = 6; top_margin = 32; width = 2048; height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V') filters = 0;
else is_raw = tiff_samples;
} else if (width == 2116) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
} else if (width == 3171) {
strcpy (model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
} else if (!strcmp(make,"Leica") || !strcmp(make,"Panasonic")) {
if ((flen - data_offset) / (raw_width*8/7) == raw_height)
load_raw = &CLASS panasonic_load_raw;
if (!load_raw) {
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height) height = raw_height;
for (i=0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1]) {
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101 * (uchar) "\x94\x61\x49\x16"
[((filters-1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
} else if (!strcmp(model,"C770UZ")) {
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
} else if (!strcmp(make,"Olympus")) {
height += height & 1;
if (exif_cfa) filters = exif_cfa;
if (width == 4100) width -= 4;
if (width == 4080) width -= 24;
if (width == 9280) { width -= 6; height -= 6; }
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model,"E-300") ||
!strcmp(model,"E-500")) {
width -= 20;
if (load_raw == &CLASS unpacked_load_raw) {
maximum = 0xfc3;
memset (cblack, 0, sizeof cblack);
}
} else if (!strcmp(model,"STYLUS1")) {
width -= 14;
maximum = 0xfff;
} else if (!strcmp(model,"E-330")) {
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
} else if (!strcmp(model,"SP550UZ")) {
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
}
} else if (!strcmp(model,"N Digital")) {
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
} else if (!strcmp(model,"DSC-F828")) {
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy (cdesc, "RGBE");
} else if (!strcmp(model,"DSC-V3")) {
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
} else if (!strcmp(make,"Sony") && raw_width == 3984) {
width = 3925;
order = 0x4d4d;
} else if (!strcmp(make,"Sony") && raw_width == 4288) {
width -= 32;
} else if (!strcmp(make,"Sony") && raw_width == 4928) {
if (height < 3280) width -= 8;
} else if (!strcmp(make,"Sony") && raw_width == 5504) { // ILCE-3000//5000
width -= height > 3664 ? 8 : 32;
} else if (!strcmp(make,"Sony") && raw_width == 6048) {
width -= 24;
if (strstr(model,"RX1") || strstr(model,"A99"))
width -= 6;
} else if (!strcmp(make,"Sony") && raw_width == 7392) {
width -= 30;
} else if (!strcmp(model,"DSLR-A100")) {
if (width == 3880) {
height--;
width = ++raw_width;
} else {
height -= 4;
width -= 4;
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
} else if (!strcmp(model,"DSLR-A350")) {
height -= 4;
} else if (!strcmp(model,"PIXL")) {
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve (0, 7, 1, 255);
} else if (!strcmp(model,"C603") || !strcmp(model,"C330")
|| !strcmp(model,"12MP")) {
order = 0x4949;
if (filters && data_offset) {
fseek (ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET);
read_shorts (curve, 256);
} else gamma_curve (0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw :
strcmp(model,"C330") ? &CLASS kodak_c603_load_raw :
&CLASS kodak_c330_load_raw;
load_flags = tiff_bps > 16;
tiff_bps = 8;
} else if (!strncasecmp(model,"EasyShare",9)) {
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
} else if (!strcasecmp(make,"Kodak")) {
if (filters == UINT_MAX) filters = 0x61616161;
if (!strncmp(model,"NC2000",6) ||
!strncmp(model,"EOSDCS",6) ||
!strncmp(model,"DCS4",4)) {
width -= 4;
left_margin = 2;
if (model[6] == ' ') model[6] = 0;
if (!strcmp(model,"DCS460A")) goto bw;
} else if (!strcmp(model,"DCS660M")) {
black = 214;
goto bw;
} else if (!strcmp(model,"DCS760M")) {
bw: colors = 1;
filters = 0;
}
if (!strcmp(model+4,"20X"))
strcpy (cdesc, "MYCY");
if (strstr(model,"DC25")) {
strcpy (model, "DC25");
data_offset = 15424;
}
if (!strncmp(model,"DC2",3)) {
raw_height = 2 + (height = 242);
if (!strncmp(model, "DC290", 5))
iso_speed = 100;
if (!strncmp(model, "DC280", 5))
iso_speed = 70;
if (flen < 100000) {
raw_width = 256; width = 249;
pixel_aspect = (4.0*height) / (3.0*width);
} else {
raw_width = 512; width = 501;
pixel_aspect = (493.0*height) / (373.0*width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
} else if (!strcmp(model,"40")) {
strcpy (model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC50")) {
strcpy (model, "DC50");
height = 512;
width = 768;
iso_speed=84;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
} else if (strstr(model,"DC120")) {
strcpy (model, "DC120");
height = 976;
width = 848;
iso_speed=160;
pixel_aspect = height/0.75/width;
load_raw = tiff_compress == 7 ?
&CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
} else if (!strcmp(model,"DCS200")) {
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
iso_speed=140;
write_thumb = &CLASS layer_thumb;
black = 17;
}
} else if (!strcmp(model,"Fotoman Pixtura")) {
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
} else if (!strncmp(model,"QuickTake",9)) {
if (head[5]) strcpy (model+10, "200");
fseek (ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(),get2()) == 30 ? 738:736;
if (height > width) {
SWAP(height,width);
fseek (ifp, data_offset-6, SEEK_SET);
flip = ~get2() & 3 ? 5:6;
}
filters = 0x61616161;
} else if (!strcmp(make,"Rollei") && !load_raw) {
switch (raw_width) {
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
else if (!strcmp(model,"GRAS-50S5C")) {
height = 2048;
width = 2440;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x49494949;
order = 0x4949;
maximum = 0xfffC;
} else if (!strcmp(model,"BB-500CL")) {
height = 2058;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"BB-500GE")) {
height = 2058;
width = 2456;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
} else if (!strcmp(model,"SVS625CL")) {
height = 2050;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x0fff;
}
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 4 || colors > 4 || colors < 1)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
return;
}
if (!model[0])
sprintf (model, "%dx%d", width, height);
if (filters == UINT_MAX) filters = 0x94949494;
if (thumb_offset && !thumb_height) {
fseek (ifp, thumb_offset, SEEK_SET);
if (ljpeg_start (&jh, 1)) {
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
if ((use_camera_matrix & (use_camera_wb || dng_version))
&& cmatrix[0][0] > 0.125) {
memcpy (rgb_cam, cmatrix, sizeof cmatrix);
raw_color = 0;
}
if (raw_color) adobe_coeff (make, model);
#ifdef LIBRAW_LIBRARY_BUILD
else if(imgdata.color.cam_xyz[0][0]<0.01)
adobe_coeff (make, model,1);
#endif
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color) adobe_coeff ("Apple","Quicktake");
if (fuji_width) {
fuji_width = width >> !fuji_layout;
if (~fuji_width & 1) filters = 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
} else {
if (raw_height < height) raw_height = height;
if (raw_width < width ) raw_width = width;
}
if (!tiff_bps) tiff_bps = 12;
if (!maximum)
{
maximum = (1 << tiff_bps) - 1;
if(maximum < 0x10000 && curve[maximum]>0 && load_raw == &CLASS sony_arw2_load_raw)
maximum = curve[maximum];
}
if (!load_raw || height < 22 || width < 22 ||
tiff_bps > 16 || tiff_samples > 6 || colors > 4)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjasper");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER;
#endif
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw ||
load_raw == &CLASS lossy_dng_load_raw) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s: You must link dcraw with %s!!\n"),
ifname, "libjpeg");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB;
#endif
}
#endif
if (!cdesc[0])
strcpy (cdesc, colors == 3 ? "RGBG":"GMCY");
if (!raw_height) raw_height = height;
if (!raw_width ) raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) |
(filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX) flip = tiff_flip;
if (flip == UINT_MAX) flip = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY,1,2);
#endif
}
//@end COMMON
//@out FILEIO
#ifndef NO_LCMS
void CLASS apply_profile (const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile=0, hOutProfile=0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
if (strcmp (input, "embed"))
hInProfile = cmsOpenProfileFromFile (input, "r");
else if (profile_length) {
#ifndef LIBRAW_LIBRARY_BUILD
prof = (char *) malloc (profile_length);
merror (prof, "apply_profile()");
fseek (ifp, profile_offset, SEEK_SET);
fread (prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem (prof, profile_length);
free (prof);
#else
hInProfile = cmsOpenProfileFromMem (imgdata.color.profile, profile_length);
#endif
} else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE;
#endif
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has no embedded profile.\n"), ifname);
#endif
}
if (!hInProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE;
#endif
return;
}
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen (output, "rb"))) {
fread (&size, 4, 1, fp);
fseek (fp, 0, SEEK_SET);
oprof = (unsigned *) malloc (size = ntohl(size));
merror (oprof, "apply_profile()");
fread (oprof, 1, size, fp);
fclose (fp);
if (!(hOutProfile = cmsOpenProfileFromMem (oprof, size))) {
free (oprof);
oprof = 0;
}
}
#ifdef DCRAW_VERBOSE
else
fprintf (stderr,_("Cannot open file %s!\n"), output);
#endif
if (!hOutProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE;
#endif
goto quit;
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Applying color profile...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,0,2);
#endif
hTransform = cmsCreateTransform (hInProfile, TYPE_RGBA_16,
hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform (hTransform, image, image, width*height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform (hTransform);
cmsCloseProfile (hOutProfile);
quit:
cmsCloseProfile (hInProfile);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE,1,2);
#endif
}
#endif
//@end FILEIO
//@out COMMON
void CLASS convert_to_rgb()
{
#ifndef LIBRAW_LIBRARY_BUILD
int row, col, c;
#endif
int i, j, k;
#ifndef LIBRAW_LIBRARY_BUILD
ushort *img;
float out[3];
#endif
float out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] =
{ { 0.436083, 0.385083, 0.143055 },
{ 0.222507, 0.716888, 0.060608 },
{ 0.013930, 0.097097, 0.714022 } };
static const double rgb_rgb[3][3] =
{ { 1,0,0 }, { 0,1,0 }, { 0,0,1 } };
static const double adobe_rgb[3][3] =
{ { 0.715146, 0.284856, 0.000000 },
{ 0.000000, 1.000000, 0.000000 },
{ 0.000000, 0.041166, 0.958839 } };
static const double wide_rgb[3][3] =
{ { 0.593087, 0.404710, 0.002206 },
{ 0.095413, 0.843149, 0.061439 },
{ 0.011621, 0.069091, 0.919288 } };
static const double prophoto_rgb[3][3] =
{ { 0.529317, 0.330092, 0.140588 },
{ 0.098368, 0.873465, 0.028169 },
{ 0.016879, 0.117663, 0.865457 } };
static const double (*out_rgb[])[3] =
{ rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb };
static const char *name[] =
{ "sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ" };
static const unsigned phead[] =
{ 1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0, 0, 0,
0x61637370, 0, 0, 0x6e6f6e65, 0, 0, 0, 0, 0xf6d6, 0x10000, 0xd32d };
unsigned pbody[] =
{ 10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20 }; /* bXYZ */
static const unsigned pwhite[] = { 0xf351, 0x10000, 0x116cc };
unsigned pcurve[] = { 0x63757276, 0, 1, 0x1000000 };
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,0,2);
#endif
gamma_curve (gamm[0], gamm[1], 0, 0);
memcpy (out_cam, rgb_cam, sizeof out_cam);
#ifndef LIBRAW_LIBRARY_BUILD
raw_color |= colors == 1 || document_mode ||
output_color < 1 || output_color > 5;
#else
raw_color |= colors == 1 ||
output_color < 1 || output_color > 5;
#endif
if (!raw_color) {
oprof = (unsigned *) calloc (phead[0], 1);
merror (oprof, "convert_to_rgb()");
memcpy (oprof, phead, sizeof phead);
if (output_color == 5) oprof[4] = oprof[5];
oprof[0] = 132 + 12*pbody[0];
for (i=0; i < pbody[0]; i++) {
oprof[oprof[0]/4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i*3+2] = oprof[0];
oprof[0] += (pbody[i*3+3] + 3) & -4;
}
memcpy (oprof+32, pbody, sizeof pbody);
oprof[pbody[5]/4+2] = strlen(name[output_color-1]) + 1;
memcpy ((char *)oprof+pbody[8]+8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256/gamm[5]+0.5) << 16;
for (i=4; i < 7; i++)
memcpy ((char *)oprof+pbody[i*3+2], pcurve, sizeof pcurve);
pseudoinverse ((double (*)[3]) out_rgb[output_color-1], inverse, 3);
for (i=0; i < 3; i++)
for (j=0; j < 3; j++) {
for (num = k=0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j*3+23]/4+i+2] = num * 0x10000 + 0.5;
}
for (i=0; i < phead[0]/4; i++)
oprof[i] = htonl(oprof[i]);
strcpy ((char *)oprof+pbody[2]+8, "auto-generated by dcraw");
strcpy ((char *)oprof+pbody[5]+12, name[output_color-1]);
for (i=0; i < 3; i++)
for (j=0; j < colors; j++)
for (out_cam[i][j] = k=0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color-1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr, raw_color ? _("Building histograms...\n") :
_("Converting to %s colorspace...\n"), name[output_color-1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
convert_to_rgb_loop(out_cam);
#else
memset (histogram, 0, sizeof histogram);
for (img=image[0], row=0; row < height; row++)
for (col=0; col < width; col++, img+=4) {
if (!raw_color) {
out[0] = out[1] = out[2] = 0;
FORCC {
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int) out[c]);
}
else if (document_mode)
img[0] = img[fcol(row,col)];
FORCC histogram[c][img[c] >> 3]++;
}
#endif
if (colors == 4 && output_color) colors = 3;
#ifndef LIBRAW_LIBRARY_BUILD
if (document_mode && filters) colors = 1;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB,1,2);
#endif
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width) return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Rotating image 45 degrees...\n"));
#endif
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort (*)[4]) calloc (high, wide*sizeof *img);
merror (img, "fuji_rotate()");
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,0,2);
#endif
for (row=0; row < high; row++)
for (col=0; col < wide; col++) {
ur = r = fuji_width + (row-col)*step;
uc = c = (row+col)*step;
if (ur > height-2 || uc > width-2) continue;
fr = r - ur;
fc = c - uc;
pix = image + ur*width + uc;
for (i=0; i < colors; i++)
img[row*wide+col][i] =
(pix[ 0][i]*(1-fc) + pix[ 1][i]*fc) * (1-fr) +
(pix[width][i]*(1-fc) + pix[width+1][i]*fc) * fr;
}
free (image);
width = wide;
height = high;
image = img;
fuji_width = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE,1,2);
#endif
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1) return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,0,2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("Stretching the image...\n"));
#endif
if (pixel_aspect < 1) {
newdim = height / pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (width, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=row=0; row < newdim; row++, rc+=pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c*width];
if (c+1 < height) pix1 += width*4;
for (col=0; col < width; col++, pix0+=4, pix1+=4)
FORCC img[row*width+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
height = newdim;
} else {
newdim = width * pixel_aspect + 0.5;
img = (ushort (*)[4]) calloc (height, newdim*sizeof *img);
merror (img, "stretch()");
for (rc=col=0; col < newdim; col++, rc+=1/pixel_aspect) {
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c+1 < width) pix1 += 4;
for (row=0; row < height; row++, pix0+=width*4, pix1+=width*4)
FORCC img[row*newdim+col][c] = pix0[c]*(1-frac) + pix1[c]*frac + 0.5;
}
width = newdim;
}
free (image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH,1,2);
#endif
}
int CLASS flip_index (int row, int col)
{
if (flip & 4) SWAP(row,col);
if (flip & 2) row = iheight - 1 - row;
if (flip & 1) col = iwidth - 1 - col;
return row * iwidth + col;
}
//@end COMMON
struct tiff_tag {
ushort tag, type;
int count;
union { char c[4]; short s[2]; int i; } val;
};
struct tiff_hdr {
ushort t_order, magic;
int ifd;
ushort pad, ntag;
struct tiff_tag tag[23];
int nextifd;
ushort pad2, nexif;
struct tiff_tag exif[4];
ushort pad3, ngps;
struct tiff_tag gpst[10];
short bps[4];
int rat[10];
unsigned gps[26];
char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64];
};
//@out COMMON
void CLASS tiff_set (ushort *ntag,
ushort tag, ushort type, int count, int val)
{
struct tiff_tag *tt;
int c;
tt = (struct tiff_tag *)(ntag+1) + (*ntag)++;
tt->tag = tag;
tt->type = type;
tt->count = count;
if (type < 3 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
else tt->val.i = val;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head (struct tiff_hdr *th, int full)
{
int c, psize=0;
struct tm *t;
memset (th, 0, sizeof *th);
th->t_order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
if (full) {
tiff_set (&th->ntag, 254, 4, 1, 0);
tiff_set (&th->ntag, 256, 4, 1, width);
tiff_set (&th->ntag, 257, 4, 1, height);
tiff_set (&th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag-1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set (&th->ntag, 259, 3, 1, 1);
tiff_set (&th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set (&th->ntag, 270, 2, 512, TOFF(th->t_desc));
tiff_set (&th->ntag, 271, 2, 64, TOFF(th->t_make));
tiff_set (&th->ntag, 272, 2, 64, TOFF(th->t_model));
if (full) {
if (oprof) psize = ntohl(oprof[0]);
tiff_set (&th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set (&th->ntag, 277, 3, 1, colors);
tiff_set (&th->ntag, 278, 4, 1, height);
tiff_set (&th->ntag, 279, 4, 1, height*width*colors*output_bps/8);
} else
tiff_set (&th->ntag, 274, 3, 1, "12435867"[flip]-'0');
tiff_set (&th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set (&th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set (&th->ntag, 284, 3, 1, 1);
tiff_set (&th->ntag, 296, 3, 1, 2);
tiff_set (&th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set (&th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set (&th->ntag, 315, 2, 64, TOFF(th->t_artist));
tiff_set (&th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize) tiff_set (&th->ntag, 34675, 7, psize, sizeof *th);
tiff_set (&th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set (&th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set (&th->nexif, 34855, 3, 1, iso_speed);
tiff_set (&th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1]) {
tiff_set (&th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set (&th->ngps, 0, 1, 4, 0x202);
tiff_set (&th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set (&th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set (&th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set (&th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set (&th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set (&th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set (&th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set (&th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set (&th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy (th->gps, gpsdata, sizeof th->gps);
}
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4+c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy (th->t_desc, desc, 512);
strncpy (th->t_make, make, 64);
strncpy (th->t_model, model, 64);
strcpy (th->soft, "dcraw v" DCRAW_VERSION);
t = localtime (×tamp);
sprintf (th->date, "%04d:%02d:%02d %02d:%02d:%02d",
t->tm_year+1900,t->tm_mon+1,t->tm_mday,t->tm_hour,t->tm_min,t->tm_sec);
strncpy (th->t_artist, artist, 64);
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS jpeg_thumb_writer (FILE *tfp,char *t_humb,int t_humb_length)
{
ushort exif[5];
struct tiff_hdr th;
fputc (0xff, tfp);
fputc (0xd8, tfp);
if (strcmp (t_humb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, tfp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, tfp);
}
fwrite (t_humb+2, 1, t_humb_length-2, tfp);
}
void CLASS jpeg_thumb()
{
char *thumb;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
jpeg_thumb_writer(ofp,thumb,thumb_length);
free (thumb);
}
#else
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *) malloc (thumb_length);
merror (thumb, "jpeg_thumb()");
fread (thumb, 1, thumb_length, ifp);
fputc (0xff, ofp);
fputc (0xd8, ofp);
if (strcmp (thumb+6, "Exif")) {
memcpy (exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons (8 + sizeof th);
fwrite (exif, 1, sizeof exif, ofp);
tiff_head (&th, 0);
fwrite (&th, 1, sizeof th, ofp);
}
fwrite (thumb+2, 1, thumb_length-2, ofp);
free (thumb);
}
#endif
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, t_white=0x2000;
#ifdef LIBRAW_LIBRARY_BUILD
perc = width * height * auto_bright_thr;
#else
perc = width * height * 0.01; /* 99th percentile white level */
#endif
if (fuji_width) perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (t_white=c=0; c < colors; c++) {
for (val=0x2000, total=0; --val > 32; )
if ((total += histogram[c][val]) > perc) break;
if (t_white < val) t_white = val;
}
gamma_curve (gamm[0], gamm[1], 2, (t_white << 3)/bright);
iheight = height;
iwidth = width;
if (flip & 4) SWAP(height,width);
ppm = (uchar *) calloc (width, colors*output_bps/8);
ppm2 = (ushort *) ppm;
merror (ppm, "write_ppm_tiff()");
if (output_tiff) {
tiff_head (&th, 1);
fwrite (&th, sizeof th, 1, ofp);
if (oprof)
fwrite (oprof, ntohl(oprof[0]), 1, ofp);
} else if (colors > 3)
fprintf (ofp,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
width, height, colors, (1 << output_bps)-1, cdesc);
else
fprintf (ofp, "P%d\n%d %d\n%d\n",
colors/2+5, width, height, (1 << output_bps)-1);
soff = flip_index (0, 0);
cstep = flip_index (0, 1) - soff;
rstep = flip_index (1, 0) - flip_index (0, width);
for (row=0; row < height; row++, soff += rstep) {
for (col=0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm [col*colors+c] = curve[image[soff][c]] >> 8;
else FORCC ppm2[col*colors+c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab ((char*)ppm2, (char*)ppm2, width*colors*2);
fwrite (ppm, colors*output_bps/8, width, ofp);
}
free (ppm);
}
//@end COMMON
int CLASS main (int argc, const char **argv)
{
int arg, status=0, quality, i, c;
int timestamp_only=0, thumbnail_only=0, identify_only=0;
int user_qual=-1, user_black=-1, user_sat=-1, user_flip=-1;
int use_fuji_rotate=1, write_to_stdout=0, read_from_stdin=0;
const char *sp, *bpfile=0, *dark_frame=0, *write_ext;
char opm, opt, *ofname, *cp;
struct utimbuf ut;
#ifndef NO_LCMS
const char *cam_profile=0, *out_profile=0;
#endif
#ifndef LOCALTIME
putenv ((char *) "TZ=UTC");
#endif
#ifdef LOCALEDIR
setlocale (LC_CTYPE, "");
setlocale (LC_MESSAGES, "");
bindtextdomain ("dcraw", LOCALEDIR);
textdomain ("dcraw");
#endif
if (argc == 1) {
printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION);
printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n"));
printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]);
puts(_("-v Print verbose messages"));
puts(_("-c Write image data to standard output"));
puts(_("-e Extract embedded thumbnail image"));
puts(_("-i Identify files without decoding them"));
puts(_("-i -v Identify files and show metadata"));
puts(_("-z Change file dates to camera timestamp"));
puts(_("-w Use camera white balance, if possible"));
puts(_("-a Average the whole image for white balance"));
puts(_("-A <x y w h> Average a grey box for white balance"));
puts(_("-r <r g b g> Set custom white balance"));
puts(_("+M/-M Use/don't use an embedded color matrix"));
puts(_("-C <r b> Correct chromatic aberration"));
puts(_("-P <file> Fix the dead pixels listed in this file"));
puts(_("-K <file> Subtract dark frame (16-bit raw PGM)"));
puts(_("-k <num> Set the darkness level"));
puts(_("-S <num> Set the saturation level"));
puts(_("-n <num> Set threshold for wavelet denoising"));
puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)"));
puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)"));
puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)"));
#ifndef NO_LCMS
puts(_("-o <file> Apply output ICC profile from file"));
puts(_("-p <file> Apply camera ICC profile from file or \"embed\""));
#endif
puts(_("-d Document mode (no color, no interpolation)"));
puts(_("-D Document mode without scaling (totally raw)"));
puts(_("-j Don't stretch or rotate raw pixels"));
puts(_("-W Don't automatically brighten the image"));
puts(_("-b <num> Adjust brightness (default = 1.0)"));
puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)"));
puts(_("-q [0-3] Set the interpolation quality"));
puts(_("-h Half-size color image (twice as fast as \"-q 0\")"));
puts(_("-f Interpolate RGGB as four colors"));
puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G"));
puts(_("-s [0..N-1] Select one raw image or \"all\" from each file"));
puts(_("-6 Write 16-bit instead of 8-bit"));
puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\""));
puts(_("-T Write TIFF instead of PPM"));
puts("");
return 1;
}
argv[argc] = "";
for (arg=1; (((opm = argv[arg][0]) - 2) | 2) == '+'; ) {
opt = argv[arg++][1];
if ((cp = (char *) strchr (sp="nbrkStqmHACg", opt)))
for (i=0; i < "114111111422"[cp-sp]-'0'; i++)
if (!isdigit(argv[arg+i][0])) {
fprintf (stderr,_("Non-numeric argument to \"-%c\"\n"), opt);
return 1;
}
switch (opt) {
case 'n': threshold = atof(argv[arg++]); break;
case 'b': bright = atof(argv[arg++]); break;
case 'r':
FORC4 user_mul[c] = atof(argv[arg++]); break;
case 'C': aber[0] = 1 / atof(argv[arg++]);
aber[2] = 1 / atof(argv[arg++]); break;
case 'g': gamm[0] = atof(argv[arg++]);
gamm[1] = atof(argv[arg++]);
if (gamm[0]) gamm[0] = 1/gamm[0]; break;
case 'k': user_black = atoi(argv[arg++]); break;
case 'S': user_sat = atoi(argv[arg++]); break;
case 't': user_flip = atoi(argv[arg++]); break;
case 'q': user_qual = atoi(argv[arg++]); break;
case 'm': med_passes = atoi(argv[arg++]); break;
case 'H': highlight = atoi(argv[arg++]); break;
case 's':
shot_select = abs(atoi(argv[arg]));
multi_out = !strcmp(argv[arg++],"all");
break;
case 'o':
if (isdigit(argv[arg][0]) && !argv[arg][1])
output_color = atoi(argv[arg++]);
#ifndef NO_LCMS
else out_profile = argv[arg++];
break;
case 'p': cam_profile = argv[arg++];
#endif
break;
case 'P': bpfile = argv[arg++]; break;
case 'K': dark_frame = argv[arg++]; break;
case 'z': timestamp_only = 1; break;
case 'e': thumbnail_only = 1; break;
case 'i': identify_only = 1; break;
case 'c': write_to_stdout = 1; break;
case 'v': verbose = 1; break;
case 'h': half_size = 1; break;
case 'f': four_color_rgb = 1; break;
case 'A': FORC4 greybox[c] = atoi(argv[arg++]);
case 'a': use_auto_wb = 1; break;
case 'w': use_camera_wb = 1; break;
case 'M': use_camera_matrix = 3 * (opm == '+'); break;
case 'I': read_from_stdin = 1; break;
case 'E': document_mode++;
case 'D': document_mode++;
case 'd': document_mode++;
case 'j': use_fuji_rotate = 0; break;
case 'W': no_auto_bright = 1; break;
case 'T': output_tiff = 1; break;
case '4': gamm[0] = gamm[1] =
no_auto_bright = 1;
case '6': output_bps = 16; break;
default:
fprintf (stderr,_("Unknown option \"-%c\".\n"), opt);
return 1;
}
}
if (arg == argc) {
fprintf (stderr,_("No files to process.\n"));
return 1;
}
if (write_to_stdout) {
if (isatty(1)) {
fprintf (stderr,_("Will not write an image to the terminal!\n"));
return 1;
}
#if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__)
if (setmode(1,O_BINARY) < 0) {
perror ("setmode()");
return 1;
}
#endif
}
for ( ; arg < argc; arg++) {
status = 1;
raw_image = 0;
image = 0;
oprof = 0;
meta_data = ofname = 0;
ofp = stdout;
if (setjmp (failure)) {
if (fileno(ifp) > 2) fclose(ifp);
if (fileno(ofp) > 2) fclose(ofp);
status = 1;
goto cleanup;
}
ifname = argv[arg];
if (!(ifp = fopen (ifname, "rb"))) {
perror (ifname);
continue;
}
status = (identify(),!is_raw);
if (user_flip >= 0)
flip = user_flip;
switch ((flip+3600) % 360) {
case 270: flip = 5; break;
case 180: flip = 3; break;
case 90: flip = 6;
}
if (timestamp_only) {
if ((status = !timestamp))
fprintf (stderr,_("%s has no timestamp.\n"), ifname);
else if (identify_only)
printf ("%10ld%10d %s\n", (long) timestamp, shot_order, ifname);
else {
if (verbose)
fprintf (stderr,_("%s time set to %d.\n"), ifname, (int) timestamp);
ut.actime = ut.modtime = timestamp;
utime (ifname, &ut);
}
goto next;
}
write_fun = &CLASS write_ppm_tiff;
if (thumbnail_only) {
if ((status = !thumb_offset)) {
fprintf (stderr,_("%s has no thumbnail.\n"), ifname);
goto next;
} else if (thumb_load_raw) {
load_raw = thumb_load_raw;
data_offset = thumb_offset;
height = thumb_height;
width = thumb_width;
filters = 0;
colors = 3;
} else {
fseek (ifp, thumb_offset, SEEK_SET);
write_fun = write_thumb;
goto thumbnail;
}
}
if (load_raw == &CLASS kodak_ycbcr_load_raw) {
height += height & 1;
width += width & 1;
}
if (identify_only && verbose && make[0]) {
printf (_("\nFilename: %s\n"), ifname);
printf (_("Timestamp: %s"), ctime(×tamp));
printf (_("Camera: %s %s\n"), make, model);
if (artist[0])
printf (_("Owner: %s\n"), artist);
if (dng_version) {
printf (_("DNG Version: "));
for (i=24; i >= 0; i -= 8)
printf ("%d%c", dng_version >> i & 255, i ? '.':'\n');
}
printf (_("ISO speed: %d\n"), (int) iso_speed);
printf (_("Shutter: "));
if (shutter > 0 && shutter < 1)
shutter = (printf ("1/"), 1 / shutter);
printf (_("%0.1f sec\n"), shutter);
printf (_("Aperture: f/%0.1f\n"), aperture);
printf (_("Focal length: %0.1f mm\n"), focal_len);
printf (_("Embedded ICC profile: %s\n"), profile_length ? _("yes"):_("no"));
printf (_("Number of raw images: %d\n"), is_raw);
if (pixel_aspect != 1)
printf (_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect);
if (thumb_offset)
printf (_("Thumb size: %4d x %d\n"), thumb_width, thumb_height);
printf (_("Full size: %4d x %d\n"), raw_width, raw_height);
} else if (!is_raw)
fprintf (stderr,_("Cannot decode file %s\n"), ifname);
if (!is_raw) goto next;
shrink = filters && (half_size || (!identify_only &&
(threshold || aber[0] != 1 || aber[2] != 1)));
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (identify_only) {
if (verbose) {
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (use_fuji_rotate) {
if (fuji_width) {
fuji_width = (fuji_width - 1 + shrink) >> shrink;
iwidth = fuji_width / sqrt(0.5);
iheight = (iheight - fuji_width) / sqrt(0.5);
} else {
if (pixel_aspect < 1) iheight = iheight / pixel_aspect + 0.5;
if (pixel_aspect > 1) iwidth = iwidth * pixel_aspect + 0.5;
}
}
if (flip & 4)
SWAP(iheight,iwidth);
printf (_("Image size: %4d x %d\n"), width, height);
printf (_("Output size: %4d x %d\n"), iwidth, iheight);
printf (_("Raw colors: %d"), colors);
if (filters) {
int fhigh = 2, fwide = 2;
if ((filters ^ (filters >> 8)) & 0xff) fhigh = 4;
if ((filters ^ (filters >> 16)) & 0xffff) fhigh = 8;
if (filters == 1) fhigh = fwide = 16;
if (filters == 9) fhigh = fwide = 6;
printf (_("\nFilter pattern: "));
for (i=0; i < fhigh; i++)
for (c = i && putchar('/') && 0; c < fwide; c++)
putchar (cdesc[fcol(i,c)]);
}
printf (_("\nDaylight multipliers:"));
FORCC printf (" %f", pre_mul[c]);
if (cam_mul[0] > 0) {
printf (_("\nCamera multipliers:"));
FORC4 printf (" %f", cam_mul[c]);
}
putchar ('\n');
} else
printf (_("%s is a %s %s image.\n"), ifname, make, model);
next:
fclose(ifp);
continue;
}
if (meta_length) {
meta_data = (char *) malloc (meta_length);
merror (meta_data, "main()");
}
if (filters || colors == 1) {
raw_image = (ushort *) calloc ((raw_height+7), raw_width*2);
merror (raw_image, "main()");
} else {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
}
if (verbose)
fprintf (stderr,_("Loading %s %s image from %s ...\n"),
make, model, ifname);
if (shot_select >= is_raw)
fprintf (stderr,_("%s: \"-s %d\" requests a nonexistent image!\n"),
ifname, shot_select);
fseeko (ifp, data_offset, SEEK_SET);
if (raw_image && read_from_stdin)
fread (raw_image, 2, raw_height*raw_width, stdin);
else (*load_raw)();
if (document_mode == 3) {
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (raw_image) {
image = (ushort (*)[4]) calloc (iheight, iwidth*sizeof *image);
merror (image, "main()");
crop_masked_pixels();
free (raw_image);
}
if (zero_is_bad) remove_zeroes();
bad_pixels (bpfile);
if (dark_frame) subtract (dark_frame);
quality = 2 + !fuji_width;
if (user_qual >= 0) quality = user_qual;
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
i = cblack[6];
FORC (cblack[4] * cblack[5])
if (i > cblack[6+c]) i = cblack[6+c];
FORC (cblack[4] * cblack[5])
cblack[6+c] -= i;
black += i;
if (user_black >= 0) black = user_black;
FORC4 cblack[c] += black;
if (user_sat > 0) maximum = user_sat;
#ifdef COLORCHECK
colorcheck();
#endif
if (is_foveon) {
if (document_mode || load_raw == &CLASS foveon_dp_load_raw) {
for (i=0; i < height*width*4; i++)
if ((short) image[0][i] < 0) image[0][i] = 0;
} else foveon_interpolate();
} else if (document_mode < 2)
scale_colors();
pre_interpolate();
if (filters && !document_mode) {
if (quality == 0)
lin_interpolate();
else if (quality == 1 || colors > 3)
vng_interpolate();
else if (quality == 2 && filters > 1000)
ppg_interpolate();
else if (filters == 9)
xtrans_interpolate (quality*2-3);
else
ahd_interpolate();
}
if (mix_green)
for (colors=3, i=0; i < height*width; i++)
image[i][1] = (image[i][1] + image[i][3]) >> 1;
if (!is_foveon && colors == 3) median_filter();
if (!is_foveon && highlight == 2) blend_highlights();
if (!is_foveon && highlight > 2) recover_highlights();
if (use_fuji_rotate) fuji_rotate();
#ifndef NO_LCMS
if (cam_profile) apply_profile (cam_profile, out_profile);
#endif
convert_to_rgb();
if (use_fuji_rotate) stretch();
thumbnail:
if (write_fun == &CLASS jpeg_thumb)
write_ext = ".jpg";
else if (output_tiff && write_fun == &CLASS write_ppm_tiff)
write_ext = ".tiff";
else
write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors*5-5;
ofname = (char *) malloc (strlen(ifname) + 64);
merror (ofname, "main()");
if (write_to_stdout)
strcpy (ofname,_("standard output"));
else {
strcpy (ofname, ifname);
if ((cp = strrchr (ofname, '.'))) *cp = 0;
if (multi_out)
sprintf (ofname+strlen(ofname), "_%0*d",
snprintf(0,0,"%d",is_raw-1), shot_select);
if (thumbnail_only)
strcat (ofname, ".thumb");
strcat (ofname, write_ext);
ofp = fopen (ofname, "wb");
if (!ofp) {
status = 1;
perror (ofname);
goto cleanup;
}
}
if (verbose)
fprintf (stderr,_("Writing data to %s ...\n"), ofname);
(*write_fun)();
fclose(ifp);
if (ofp != stdout) fclose(ofp);
cleanup:
if (meta_data) free (meta_data);
if (ofname) free (ofname);
if (oprof) free (oprof);
if (image) free (image);
if (multi_out) {
if (++shot_select < is_raw) arg--;
else shot_select = 0;
}
}
return status;
}
#endif
|
control_tool.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
int main()
{
#pragma omp parallel num_threads(1)
{
print_frame_from_outlined_fn(1);
print_frame(0);
omp_control_tool(omp_control_tool_flush, 1, NULL);
print_current_address(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_control_tool'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address({{.}})=[[EXIT_FRAME:0x[0-f]*]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER_FRAME:0x[0-f]*]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_control_tool: command=3, modifier=1, arg=[[NULL]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]*]], current_task_frame.exit=[[EXIT_FRAME]], current_task_frame.reenter={{0x[0-f]*}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
GB_binop__rminus_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint16)
// A*D function (colscale): GB (_AxD__rminus_uint16)
// D*A function (rowscale): GB (_DxB__rminus_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint16)
// C=scalar+B GB (_bind1st__rminus_uint16)
// C=scalar+B' GB (_bind1st_tran__rminus_uint16)
// C=A+scalar GB (_bind2nd__rminus_uint16)
// C=A'+scalar GB (_bind2nd_tran__rminus_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT16 || GxB_NO_RMINUS_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_uint64
// op(A') function: GB_tran__minv_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_uint64
(
int8_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
section1.c | #include <stdio.h>
#include <omp.h>
int fun1()
{
return 2;
}
int fun2()
{
return 3;
}
int main()
{
omp_set_num_threads(4);
int a, b; // Declared as shared variable because if declared private then
// suppose thread 0 assigned a to 1. No other thread will have access to this value
#pragma omp parallel
{
int tid = omp_get_thread_num();
#pragma omp sections
{
#pragma omp section
{
printf("%d | Executing 1\n", tid);
a = fun1();
}
#pragma omp section
{
printf("%d | Executing 2\n", tid);
b = fun2();
}
}
// Implicit barrier is present here to ensure that sections are executed first
#pragma omp single
printf("Answer %d", a + b);
}
} |
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) {
for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(8*t3+Nx+4,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
interpolate_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/hostdevice.h"
namespace paddle {
namespace operators {
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
inline std::vector<int> get_new_shape(
const std::vector<const Tensor*>& list_new_shape_tensor) {
// get tensor from
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
platform::errors::InvalidArgument(
"The shape of dimension tensor should be [1],"
"but received d%.",
tensor->dims()));
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
} else {
vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
}
}
return vec_new_shape;
}
template <typename T>
inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std::vector<T> vec_new_data;
auto* new_data = new_data_tensor->data<T>();
framework::Tensor cpu_starts_tensor;
if (platform::is_gpu_place(new_data_tensor->place())) {
TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel());
return vec_new_data;
}
inline void ExtractNCDWH(const framework::DDim& dims,
const DataLayout& data_layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
if (dims.size() == 3) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2];
*D = 1;
*H = 1;
*W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
} else if (dims.size() == 4) {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3];
*D = 1;
*H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
} else {
*C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4];
*D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1];
*H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2];
*W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3];
}
}
template <typename T>
static void NearestNeighborInterpolate(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) = input_t(i, j, in_k, in_l);
} else {
output_t(i, k, l, j) = input_t(i, in_k, in_l, j);
}
}
}
}
}
}
template <typename T>
static void LinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_w, const int in_w,
const int n, const int c, const int out_w,
const bool align_corners, const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 3>::From(input);
auto output_t = EigenTensor<T, 3>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(3)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int l = 0; l < out_w; l++) {
// linear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vx_w[l]) * vd_e[l] +
input_t(i, j, vx_e[l]) * vd_w[l];
output_t(i, j, l) = out_t;
} else {
out_t = input_t(i, vx_w[l], j) * vd_e[l] +
input_t(i, vx_e[l], j) * vd_w[l];
output_t(i, l, j) = out_t;
}
}
}
}
}
template <typename T>
static void LinearInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_w,
const int in_w, const int n, const int c,
const int out_w, const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 3>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 3>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0; // w
int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda
float d_e = 1.f - d_w; // w2lambda
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// linear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, l);
input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e);
input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w);
} else {
const T grad = output_grad_t(i, l, j);
input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e);
input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w);
}
}
}
}
}
template <typename T>
static void BilinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const bool align_mode,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int k = 0; k < out_h; k++) { // loop for images
for (int l = 0; l < out_w; l++) {
// bilinear interpolation
T out_t;
if (data_layout == DataLayout::kNCHW) {
out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] +
input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] +
input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] +
input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l];
output_t(i, j, k, l) = out_t;
} else {
out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] +
input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] +
input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] +
input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l];
output_t(i, k, l, j) = out_t;
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolation(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const bool align_mode,
const DataLayout& data_layout) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vt_f, vt_b;
std::vector<float> vd_f, vd_b;
vt_f.reserve(out_d);
vt_b.reserve(out_d);
vd_f.reserve(out_d);
vd_b.reserve(out_d);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int j = 0; j < out_d; j++) {
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
{
vt_f[j] = t_f;
vt_b[j] = t_b;
vd_f[j] = d_f;
vd_b[j] = d_b;
}
}
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(5)
#endif
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
for (int j = 0; j < out_d; j++) { // loop for D, H, W
for (int k = 0; k < out_h; k++) {
for (int l = 0; l < out_w; l++) {
// trilinear interpolation
if (data_layout == DataLayout::kNCHW) {
T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, i, j, k, l) = out_t;
} else {
T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, j, k, l, i) = out_t;
}
}
}
}
}
}
}
template <typename T>
HOSTDEVICE inline T cubic_convolution1(T x, T A) {
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
template <typename T>
HOSTDEVICE inline T cubic_convolution2(T x, T A) {
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
template <typename T>
HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) {
T A = -0.75;
T x1 = t;
coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A);
coeffs[1] = cubic_convolution1<T>(x1, A);
// opposite coefficients
T x2 = 1.0 - t;
coeffs[2] = cubic_convolution1<T>(x2, A);
coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A);
}
template <typename T>
static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) {
T coeffs[4];
get_cubic_upsample_coefficients<T>(coeffs, t);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
static void BicubicInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
const T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
const T x_t = x_n - input_x;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
T coefficients[4];
// interp 4 times in x direction
for (int ii = 0; ii < 4; ii++) {
int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1),
static_cast<int>(0));
int access_x_0 =
std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0));
int access_x_1 =
std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0));
int access_x_2 =
std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0));
int access_x_3 =
std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
coefficients[ii] =
cubic_interp<T>(input_t(i, j, access_y, access_x_0),
input_t(i, j, access_y, access_x_1),
input_t(i, j, access_y, access_x_2),
input_t(i, j, access_y, access_x_3), x_t);
} else {
coefficients[ii] =
cubic_interp<T>(input_t(i, access_y, access_x_0, j),
input_t(i, access_y, access_x_1, j),
input_t(i, access_y, access_x_2, j),
input_t(i, access_y, access_x_3, j), x_t);
}
}
// interp y direction
if (data_layout == DataLayout::kNCHW) {
output_t(i, j, k, l) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
} else {
output_t(i, k, l, j) =
cubic_interp<T>(coefficients[0], coefficients[1],
coefficients[2], coefficients[3], y_t);
}
}
}
}
}
}
template <typename T>
static void NearestNeighborInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int n, const int c, const int out_h,
const int out_w, const bool align_corners, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
if (data_layout == DataLayout::kNCHW) {
input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l);
} else {
input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j);
}
}
}
}
}
}
template <typename T>
static void BilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w, const bool align_corners,
const int align_mode, const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int k = 0; k < out_h; k++) { // loop for images
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w);
} else {
const T grad = output_grad_t(i, k, l, j);
input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w);
}
}
}
}
}
}
template <typename T>
static void TrilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const int align_mode,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int j = 0; j < out_d; j++) { // loop for D
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
for (int k = 0; k < out_h; k++) { // loop for H
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) { // loop for W
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
// trilinear interpolation grad
if (data_layout == DataLayout::kNCHW) {
const T grad = output_grad_t(b, i, j, k, l);
input_grad_t(b, i, t_f, y_n, x_w) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, i, t_f, y_n, x_e) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, i, t_f, y_s, x_w) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, i, t_f, y_s, x_e) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, i, t_b, y_n, x_w) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, i, t_b, y_n, x_e) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, i, t_b, y_s, x_w) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, i, t_b, y_s, x_e) +=
static_cast<T>(grad * d_f * d_n * d_w);
} else {
const T grad = output_grad_t(b, j, k, l, i);
input_grad_t(b, t_f, y_n, x_w, i) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, t_f, y_n, x_e, i) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, t_f, y_s, x_w, i) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, t_f, y_s, x_e, i) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, t_b, y_n, x_w, i) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, t_b, y_n, x_e, i) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, t_b, y_s, x_w, i) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, t_b, y_s, x_e, i) +=
static_cast<T>(grad * d_f * d_n * d_w);
}
}
}
}
}
}
}
template <typename T>
static void BicubicInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h,
const int in_w, const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const DataLayout data_layout) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
T y_n = align_corners ? static_cast<T>(ratio_h * k)
: static_cast<T>(ratio_h * (k + 0.5) - 0.5);
int input_y = floorf(y_n);
T y_t = y_n - input_y;
for (int l = 0; l < out_w; l++) {
T x_n = align_corners ? static_cast<T>(ratio_w * l)
: static_cast<T>(ratio_w * (l + 0.5) - 0.5);
int input_x = floorf(x_n);
T x_t = x_n - input_x;
T x_coeffs[4];
T y_coeffs[4];
get_cubic_upsample_coefficients<T>(x_coeffs, x_t);
get_cubic_upsample_coefficients<T>(y_coeffs, y_t);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bicubic interpolation grad
for (int ii = 0; ii < 4; ii++) {
for (int jj = 0; jj < 4; jj++) {
int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1),
static_cast<int>(0));
int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1),
static_cast<int>(0));
if (data_layout == DataLayout::kNCHW) {
T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, access_y, access_x) +=
grad * y_coeffs[jj] * x_coeffs[ii];
} else {
T grad = output_grad_t(i, k, l, j);
input_grad_t(i, access_y, access_x, j) +=
grad * y_coeffs[jj] * x_coeffs[ii];
}
}
}
}
}
}
}
}
template <typename T>
static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
}
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("linear" == interp_method) {
LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
}
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, align_mode,
data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h,
out_w, align_corners, data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, data_layout);
}
}
template <typename T>
static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
}
PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument(
"out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument(
"out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument(
"out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
framework::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->mutable_data<T>(dim_out, ctx.GetPlace());
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d,
in_h, in_w, n, c, out_d, out_h, out_w,
align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_w = out_size_data[0];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_w = new_size[0];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_w};
} else {
dim_grad = {n, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("linear" == interp_method) {
LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c,
out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_h, in_w};
} else {
dim_grad = {n, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
in_h, in_w, n, c, out_h, out_w, align_corners,
align_mode, data_layout);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
n, c, out_h, out_w, align_corners,
data_layout);
} else if ("bicubic" == interp_method) {
BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h,
in_w, n, c, out_h, out_w, align_corners,
data_layout);
}
}
template <typename T>
static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor output_grad) {
auto* input = ctx.Input<Tensor>("X");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout = framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
}
framework::DDim dim_grad;
if (data_layout == DataLayout::kNCHW) {
dim_grad = {n, c, in_d, in_h, in_w};
} else {
dim_grad = {n, in_d, in_h, in_w, c};
}
input_grad->mutable_data<T>(dim_grad, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolationGrad<T>(
output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n,
c, out_d, out_h, out_w, align_corners, align_mode, data_layout);
}
}
template <typename T>
class InterpolateKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto input_dims = input->dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCPUFwd<T>(ctx, *input, output);
}
}
};
template <typename T>
class InterpolateGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto output_grad_dims = output_grad->dims();
if (output_grad_dims.size() == 3) { // 1D interpolation grad
Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 4) { // 2D interpolation grad
Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 5) { // 3D interpolation grad
Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad);
}
}
};
} // namespace operators
} // namespace paddle
|
Argument.h | /*************************************************************************
> File Name: Argument.h
> Author: ShiShupeng
> Mail: shishupeng@mail.nsccwx.cn
> Created Time: 2018年08月01日 星期三 14时33分00秒
> Program Infomation:
>
************************************************************************/
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <mpi.h>
#include <omp.h>
#define XXnc(cmd) \
{ \
int status = (cmd); \
if (status) \
{ \
fprintf(stderr, \
"%s:%i: error: %s\n", \
__FILE__, \
__LINE__, \
nc_strerror(status)); \
} \
}
typedef struct fileinfo
{
int filesnum;
int ncfid; /* ID of the input netCDF file */
int ndims; /* Number of dimensions */
int nvars; /* Number of variables */
int ngatts; /* Number of global attributes */
int recdim; /* ID of the record dimensions */
char varname[MAX_NC_VARS][MAX_NC_NAME]; /* Names of the variables */
nc_type datatype[MAX_NC_VARS]; /* Data types of the variables */
int varndims[MAX_NC_VARS]; /* Number of dimensions for each variable */
int vardim[MAX_NC_VARS][MAX_NC_DIMS]; /* Dimensions for each variable */
int natts[MAX_NC_VARS]; /* Number of attributes for each variable */
int varid[MAX_NC_VARS];
unsigned char vardecomp[MAX_NC_VARS]; /* Is the variable decomposed */
char dimname[MAX_NC_DIMS][MAX_NC_NAME]; /* Names of the dimensions */
long dimsize[MAX_NC_DIMS]; /* Sizes of the dimensions (decomposed) */
long dimfullsize[MAX_NC_DIMS]; /* Full sizes of the dimensions */
long dimstart[MAX_NC_DIMS]; /* Start positions within full dimensions */
long dimend[MAX_NC_DIMS]; /* End positions within full dimensions */
unsigned char varmiss[MAX_NC_VARS]; /* Does variable have missing_value */
unsigned char varmissval[MAX_NC_VARS][8]; /* missing_value per variable */
} FileInfo;
typedef struct AttInfo {
char name[NC_MAX_NAME + 1];
nc_type type;
int len;
char data[512];
int datai[4];
double datad[4];
float dataf[4];
} Att;
typedef struct VarInfo {
int id;
char name[NC_MAX_NAME + 1];
nc_type type;
int ndims;
int dimid[4];
int dimfullsize[4];
int start[4];
int end[4];
int attnum;
Att att[4];
} Var;
typedef struct BufInfo {
int id;
char name[NC_MAX_NAME + 1];
nc_type type;
int ndims;
int dimid[4];
size_t varsize;
int Xst;
int Xed;
int Yst;
int Yed;
int Zst;
int Zed;
} Buf;
double *alldatad = NULL;
float *alldataf = NULL;
int *alldatai = NULL;
char *alldatac = NULL;
short *alldatas = NULL;
double *datad = NULL;
float *dataf = NULL;
int *datai = NULL;
char *datac = NULL;
short *datas = NULL;
void InitOutfile(FileInfo *ncoutfile, char *outncname)
{
FileInfo *ncinfile;
int nfiles2;
int d, v, n;
int dimid;
int decomp[4];
char attname[MAX_NC_NAME];
ncinfile = (FileInfo*) malloc (sizeof(FileInfo));
char filename[256];
sprintf(filename, "%s.%04d", outncname, 0);
ncinfile->ncfid = ncopen(filename, NC_NOWRITE);
ncattget(ncinfile->ncfid, NC_GLOBAL, "NumFilesInSet", (void*) &nfiles2);
ncoutfile->filesnum = nfiles2;
/* Get some general information about the input netCDF file */
if (ncinquire(ncinfile->ncfid,&(ncinfile->ndims),&(ncinfile->nvars),
&(ncinfile->ngatts),&(ncinfile->recdim))==(-1)) {
fprintf(stderr,"Error: cannot read the file's metadata!\n");
ncclose(ncinfile->ncfid); free(ncinfile);
}
ncoutfile->ndims = ncinfile->ndims;
ncoutfile->ngatts = ncinfile->ngatts;
/* Get some information about the dimensions */
for (d=0; d < ncinfile->ndims; d++) {
if ((ncdiminq(ncinfile->ncfid,d,ncinfile->dimname[d],
&(ncinfile->dimsize[d])))==(-1)) {
fprintf(stderr,"Error: cannot read dimension #%d's metadata!\n",d);
ncclose(ncinfile->ncfid); free(ncinfile);
}
ncinfile->dimfullsize[d]=ncinfile->dimsize[d];
ncinfile->dimstart[d]=1;
ncinfile->dimend[d]=(-1);
}
/* Save some information for the output file */
ncoutfile->nvars=ncinfile->nvars;
ncoutfile->recdim=ncinfile->recdim;
/* Get some information about the variables */
for (v=0; v < ncinfile->nvars; v++) {
if ((ncvarinq(ncinfile->ncfid,v,ncinfile->varname[v],
&(ncinfile->datatype[v]),&(ncinfile->varndims[v]),
ncinfile->vardim[v],&(ncinfile->natts[v])))==(-1)) {
fprintf(stderr,"Error: cannot read variable #%d's metadata!\n",v);
ncclose(ncinfile->ncfid);
free(ncinfile);
}
ncoutfile->datatype[v] = ncinfile->datatype[v];
memcpy(ncoutfile->vardim[v], ncinfile->vardim[v], sizeof(int) * 4);
ncoutfile->natts[v] = ncinfile->natts[v];
dimid = -1;
nc_inq_dimid(ncinfile->ncfid,ncinfile->varname[v], &dimid);
/* If the variable is also a dimension then get decomposition info */
if ((dimid)!=(-1)) {
int jj = nc_get_att_int(ncinfile->ncfid,v,"domain_decomposition", (void *)decomp);
if (jj == 0) {
/* the dimension is decomposed */
ncinfile->dimfullsize[dimid]=decomp[1]-decomp[0]+1;
ncinfile->dimstart[dimid]=decomp[2]-(decomp[0]-1);
ncinfile->dimend[dimid]=decomp[3]-(decomp[0]-1);
}
else {
/* the dimension is NOT decomposed */
ncinfile->dimfullsize[dimid]=ncinfile->dimsize[dimid];
ncinfile->dimstart[dimid]=1; ncinfile->dimend[dimid]=(-1);
}
}
}
/* Get some additional information about the variables */
for (v=0; v < ncinfile->nvars; v++) {
/* start by assuming the variable has no decomposed dimension */
ncinfile->vardecomp[v]=0;
/* now, iterate over the variable's dimensions and mark the */
/* variable as a decomposed variable if any dimension of */
/* the variable is decomposed */
for (d=0; d < ncinfile->varndims[v]; d++) {
/* Does the variable have a decomposed dimension? */
if (ncinfile->dimend[ncinfile->vardim[v][d]]!=(-1)) {
ncinfile->vardecomp[v]=1;
break;
}
}
//printf("varid%d: dim = %d\n", v, ncoutfile->varndims[v]);
/* Save some information for the output file */
/* This only needs to be done once per output file */
ncoutfile->varndims[v]=ncinfile->varndims[v];
for (d=0; d < ncinfile->varndims[v]; d++) {
ncoutfile->vardim[v][d]=ncinfile->vardim[v][d];
}
ncoutfile->vardecomp[v]=ncinfile->vardecomp[v];
strcpy(ncoutfile->varname[v],ncinfile->varname[v]);
ncoutfile->varmiss[v]=0;
}
for (d=0; d < ncinfile->ndims; d++) {
ncoutfile->dimfullsize[d]=ncinfile->dimfullsize[d];
}
/* If the output netCDF file was just created then define its structure */
/* Define the dimensions */
for (d=0; d < ncinfile->ndims; d++) {
ncdimdef(ncoutfile->ncfid,ncinfile->dimname[d], ncinfile->dimfullsize[d]);
}
/* Define the variables and copy their attributes */
for (v=0; v < ncinfile->nvars; v++) {
ncoutfile->varid[v] = ncvardef(ncoutfile->ncfid,ncinfile->varname[v],ncinfile->datatype[v], ncinfile->varndims[v],ncinfile->vardim[v]);
for (n=0; n < ncinfile->natts[v]; n++) {
ncattname(ncinfile->ncfid,v,n,attname);
if (!strcmp(attname,"domain_decomposition")) continue;
else {
if (ncattcopy(ncinfile->ncfid,v,attname,ncoutfile->ncfid,v)==(-1)) {
fprintf(stderr,"Error: cannot copy variable \"%s\"'s attributes!\n",
ncinfile->varname[v]);
free(ncinfile);
}
}
}
}
/* Copy the global attributes */
for (n=0; n < ncinfile->ngatts; n++) {
ncattname(ncinfile->ncfid,NC_GLOBAL,n,attname);
if (!strcmp(attname,"NumFilesInSet")) {
continue;
}
else if (!strcmp(attname,"filename")) {
ncattput(ncoutfile->ncfid,NC_GLOBAL,attname,NC_CHAR,
strlen(outncname),(void *)outncname);
}
else {
if (ncattcopy(ncinfile->ncfid,NC_GLOBAL,attname,ncoutfile->ncfid, NC_GLOBAL)==(-1)) {
fprintf(stderr,"Error: cannot copy the file's global attributes!\n");
}
}
}
nc_enddef(ncoutfile->ncfid);
}
int GetNvars(char *name)
{
char filename[256];
sprintf(filename, "%s.%04d", name, 0);
int ncid;
int nvars, varid, attnum, natts;
XXnc(nc_open(filename, 0, &ncid));
for (nvars = 0;; nvars++)
if (nc_inq_varname(ncid, nvars, NULL))
break;
return nvars;
}
void GetHeadinfo(Var *ncvar, char *name, int nvars, Att *gatt, int *gattnum)
{
char filename[256];
sprintf(filename, "%s.%04d", name, 0);
int ncid;
int varid, attnum, natts;
XXnc(nc_open(filename, 0, &ncid));
for(varid = 0; varid < nvars; varid++) {
char namevar[NC_MAX_NAME + 1];
nc_type xxtype;
int ndims, natts, attnum, dimid[4];
XXnc(nc_inq_var(ncid, varid, namevar, &xxtype, &ndims, dimid, &natts));
ncvar[varid].id = varid;
ncvar[varid].type = xxtype;
ncvar[varid].ndims = ndims;
strcpy(ncvar[varid].name, namevar);
memcpy(ncvar[varid].dimid, dimid, sizeof(int) * ndims);
for (attnum = 0; ;attnum++)
{
char name[NC_MAX_NAME + 1];
if (nc_inq_attname(ncid, varid, attnum, name))
break;
nc_type xtype;
XXnc(nc_inq_atttype(ncid, varid, name, &xtype));
strcpy(ncvar[varid].att[attnum].name, name);
ncvar[varid].att[attnum].type = xtype;
if (xtype == NC_CHAR)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
char v[len + 1];
v[len] = '\0';
XXnc(nc_get_att_text(ncid, varid, name, v));
ncvar[varid].att[attnum].len = len;
strcpy(ncvar[varid].att[attnum].data, v);
}
else if (xtype == NC_DOUBLE)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
double v[len];
int kk;
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_double(ncid, varid, name, &v[kk]));
}
}
else if (xtype == NC_INT)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
int v[len];
int kk;
XXnc(nc_get_att_int(ncid, varid, name, v));
for(kk = 0; kk < len; kk++) {
//printf("INT %d\n", v[kk]);
}
}
else if (xtype == NC_FLOAT)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
float v[len];
int kk;
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_float(ncid, varid, name, &v[kk]));
}
}
else
{
printf("null\n");
}
}
ncvar[varid].attnum = attnum;
/*------------------------
* global att
* ------------------------*/
for (attnum = 0; ;attnum++)
{
char name[NC_MAX_NAME + 1];
if (nc_inq_attname(ncid, NC_GLOBAL, attnum, name))
break;
nc_type xtype;
XXnc(nc_inq_atttype(ncid, NC_GLOBAL, name, &xtype));
strcpy(gatt[attnum].name, name);
gatt[attnum].type = xtype;
if (xtype == NC_CHAR)
{
size_t len;
XXnc(nc_inq_attlen(ncid, NC_GLOBAL, name, &len));
char v[len + 1];
v[len] = '\0';
XXnc(nc_get_att_text(ncid, NC_GLOBAL, name, v));
gatt[attnum].len = len;
strcpy(gatt[attnum].data, v);
}
else if (xtype == NC_DOUBLE)
{
size_t len;
XXnc(nc_inq_attlen(ncid, NC_GLOBAL, name, &len));
double v[len];
int kk;
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_double(ncid, NC_GLOBAL, name, &v[kk]));
}
gatt[attnum].len = len;
memcpy(gatt[attnum].datad, v, sizeof(double) * len);
}
else if (xtype == NC_INT)
{
size_t len;
XXnc(nc_inq_attlen(ncid, NC_GLOBAL, name, &len));
int v[len];
int kk;
XXnc(nc_get_att_int(ncid, NC_GLOBAL, name, v));
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_int(ncid, NC_GLOBAL, name, &v[kk]));
}
gatt[attnum].len = len;
memcpy(gatt[attnum].datai, v, sizeof(int) * len);
}
else if (xtype == NC_FLOAT)
{
size_t len;
XXnc(nc_inq_attlen(ncid, NC_GLOBAL, name, &len));
float v[len];
int kk;
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_float(ncid, NC_GLOBAL, name, &v[kk]));
}
gatt[attnum].len = len;
memcpy(gatt[attnum].dataf, v, sizeof(float) * len);
}
else
{
printf("null\n");
}
}
*gattnum = attnum;
}
nc_close(ncid);
}
void GetVarinfo_all(int nvars, char *name, size_t *dimlens, Var *ncvar)
{
int varid;
int ncid;
char filename[256];
sprintf(filename, "%s.%04d", name, 0);
XXnc(nc_open(filename, 0, &ncid));
int i;
for(varid = 0; varid < nvars; varid++) {
char namevar[NC_MAX_NAME + 1];
nc_type xxtype;
int ndims, natts, attnum, dimid[4];
XXnc(nc_inq_var(ncid, varid, namevar, &xxtype, &ndims, dimid, &natts));
ncvar[varid].id = varid;
ncvar[varid].type = xxtype;
ncvar[varid].ndims = ndims;
memcpy(ncvar[varid].dimid, dimid, sizeof(int) * ndims);
strcpy(ncvar[varid].name, namevar);
for (attnum = 0; ;attnum++)
{
char name[NC_MAX_NAME + 1];
if (nc_inq_attname(ncid, varid, attnum, name))
break;
nc_type xtype;
XXnc(nc_inq_atttype(ncid, varid, name, &xtype));
memcpy(ncvar[varid].att[attnum].name, name, strlen(name));
ncvar[varid].att[attnum].type = xtype;
if (xtype == NC_CHAR)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
char v[len + 1];
v[len] = '\0';
XXnc(nc_get_att_text(ncid, varid, name, v));
ncvar[varid].att[attnum].len = len;
strcpy(ncvar[varid].att[attnum].data, v);
}
else if (xtype == NC_DOUBLE)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
double v[len];
int kk;
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_double(ncid, varid, name, &v[kk]));
}
}
else if (xtype == NC_INT)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
int v[len];
int kk;
XXnc(nc_get_att_int(ncid, varid, name, v));
for(kk = 0; kk < len; kk++) {
//printf("INT %d\n", v[kk]);
}
}
else if (xtype == NC_FLOAT)
{
size_t len;
XXnc(nc_inq_attlen(ncid, varid, name, &len));
float v[len];
int kk;
for(kk = 0; kk < len; kk++) {
XXnc(nc_get_att_float(ncid, varid, name, &v[kk]));
}
}
else
{
printf("null\n");
}
}
ncvar[varid].attnum = attnum;
}
}
void GetBufinfo(int ncid, int varid, size_t *dimlens, Buf *ncvar)
{
char namevar[NC_MAX_NAME + 1];
nc_type xxtype;
int ndims, natts, attnum, dimid[4];
XXnc(nc_inq_var(ncid, varid, namevar, &xxtype, &ndims, dimid, &natts));
ncvar[varid].id = varid;
ncvar[varid].type = xxtype;
ncvar[varid].ndims = ndims;
memcpy(ncvar[varid].dimid, dimid, sizeof(int) * ndims);
strcpy(ncvar[varid].name, namevar);
int k;
size_t varsize = 1;
for(k = 0; k < ncvar[varid].ndims; k++) {
varsize *= dimlens[ncvar[varid].dimid[k]];
}
}
void GetVarinfo(int ncid, int varid, size_t *dimlens, Var *ncvar)
{
char namevar[NC_MAX_NAME + 1];
nc_type xxtype;
int ndims, natts, attnum, dimid[4];
XXnc(nc_inq_var(ncid, varid, namevar, &xxtype, &ndims, dimid, &natts));
ncvar->id = varid;
ncvar->type = xxtype;
ncvar->ndims = ndims;
memcpy(ncvar->dimid, dimid, sizeof(int) * ndims);
strcpy(ncvar->name, namevar);
}
void WriteData(int ncid, int varid, size_t varsize, Var ncvar, size_t *dimlens, size_t *dimlensall)
{
size_t i, j, k, l, m, n, p, q;
if(ncvar.ndims == 1) {
double t1, t2, t3;
char dimnames[256];
int decomp[4];
int dimvarid;
size_t start, end, dimsize, dimfullsize;
XXnc(nc_inq_dimname(ncid, ncvar.dimid[0], dimnames));
XXnc(nc_inq_varid(ncid, dimnames, &dimvarid));
int jj = nc_get_att(ncid, dimvarid, "domain_decomposition", (void *)decomp);
if(jj == 0) {
dimfullsize = decomp[1] - decomp[0] + 1;
start = decomp[2] - decomp[0] + 1;
end = decomp[3] - decomp[0] + 1;
dimsize = end - start + 1;
}
else {
start = 1;
end = dimlensall[ncvar.dimid[0]];
dimsize = end - start + 1;
}
switch (ncvar.type) {
case NC_BYTE:
case NC_CHAR:
#pragma omp parallel for
for(i = 0; i < dimsize; i++) {
alldatac[i + start - 1] = datac[i];
}
break;
case NC_SHORT:
#pragma omp parallel for
for(i = 0; i < dimsize; i++) {
alldatas[i + start - 1] = datas[i];
}
break;
case NC_INT:
#pragma omp parallel for
for(i = 0; i < dimsize; i++) {
alldatai[i + start - 1] = datai[i];
}
break;
case NC_FLOAT:
#pragma omp parallel for
for(i = 0; i < dimsize; i++) {
alldataf[i + start - 1] = dataf[i];
}
break;
case NC_DOUBLE:
#pragma omp parallel for
for(i = 0; i < dimsize; i++) {
alldatad[i + start - 1] = datad[i];
}
break;
}
} else if(ncvar.ndims == 2) {
double t1, t2, t3;
char dimnames[2][256];
int dimvarid[2];
size_t start[2], end[2], dimsize[2], dimfullsize[2];
int decomp[2][4];
XXnc(nc_inq_dimname(ncid, ncvar.dimid[0], dimnames[0]));
XXnc(nc_inq_varid(ncid, dimnames[0], &dimvarid[0]));
int ii = nc_get_att(ncid, dimvarid[0], "domain_decomposition", (void *)decomp[0]);
XXnc(nc_inq_dimname(ncid, ncvar.dimid[1], dimnames[1]));
XXnc(nc_inq_varid(ncid, dimnames[1], &dimvarid[1]));
int jj = nc_get_att(ncid, dimvarid[1], "domain_decomposition", (void *)decomp[1]);
if(ii == 0) {
dimfullsize[0] = decomp[0][1] - decomp[0][0] + 1;
start[0] = decomp[0][2] - decomp[0][0] + 1;
end[0] = decomp[0][3] - decomp[0][0] + 1;
dimsize[0] = end[0] - start[0] + 1;
}
else {
start[0] = 1;
end[0] = dimlensall[ncvar.dimid[0]];
dimsize[0] = end[0] - start[0] + 1;
dimfullsize[0] = dimsize[0];
}
if(jj == 0) {
dimfullsize[1] = decomp[1][1] - decomp[1][0] + 1;
start[1] = decomp[1][2] - decomp[1][0] + 1;
end[1] = decomp[1][3] - decomp[1][0] + 1;
dimsize[1] = end[1] - start[1] + 1;
}
else {
start[1] = 1;
end[1] = dimlensall[ncvar.dimid[1]];
dimsize[1] = end[1] - start[1] + 1;
dimfullsize[1] = dimsize[1];
}
switch (ncvar.type) {
case NC_BYTE:
case NC_CHAR:
#pragma omp parallel for private(i, j, k, l, m)
for(j = 0; j < dimsize[0]; j++) {
for(i = 0; i < dimsize[1]; i++) {
l = j + start[0] - 1;
k = i + start[1] - 1;
m = l * dimfullsize[1] + k;
alldatac[m] = datac[j * dimsize[1] + i];
}
}
break;
case NC_SHORT:
#pragma omp parallel for private(i, j, k, l, m)
for(j = 0; j < dimsize[0]; j++) {
for(i = 0; i < dimsize[1]; i++) {
l = j + start[0] - 1;
k = i + start[1] - 1;
m = l * dimfullsize[1] + k;
alldatas[m] = datas[j * dimsize[1] + i];
}
}
break;
case NC_INT:
#pragma omp parallel for private(i, j, k, l, m)
for(j = 0; j < dimsize[0]; j++) {
for(i = 0; i < dimsize[1]; i++) {
l = j + start[0] - 1;
k = i + start[1] - 1;
m = l * dimfullsize[1] + k;
alldatai[m] = datai[j * dimsize[1] + i];
}
}
break;
case NC_FLOAT:
#pragma omp parallel for private(i, j, k, l, m)
for(j = 0; j < dimsize[0]; j++) {
for(i = 0; i < dimsize[1]; i++) {
l = j + start[0] - 1;
k = i + start[1] - 1;
m = l * dimfullsize[1] + k;
alldataf[m] = dataf[j * dimsize[1] + i];
}
}
break;
case NC_DOUBLE:
#pragma omp parallel for private(i, j, k, l, m)
for(j = 0; j < dimsize[0]; j++) {
for(i = 0; i < dimsize[1]; i++) {
l = j + start[0] - 1;
k = i + start[1] - 1;
m = l * dimfullsize[1] + k;
alldatad[m] = datad[j * dimsize[1] + i];
}
}
break;
}
} else if (ncvar.ndims == 3) {
char dimnames[3][256];
int dimvarid[3];
size_t start[3], end[3], dimsize[3], dimfullsize[3];
int decomp[3][4];
XXnc(nc_inq_dimname(ncid, ncvar.dimid[0], dimnames[0]));
XXnc(nc_inq_varid(ncid, dimnames[0], &dimvarid[0]));
int ii = nc_get_att(ncid, dimvarid[0], "domain_decomposition", (void *)decomp[0]);
XXnc(nc_inq_dimname(ncid, ncvar.dimid[1], dimnames[1]));
XXnc(nc_inq_varid(ncid, dimnames[1], &dimvarid[1]));
int jj = nc_get_att(ncid, dimvarid[1], "domain_decomposition", (void *)decomp[1]);
XXnc(nc_inq_dimname(ncid, ncvar.dimid[2], dimnames[2]));
XXnc(nc_inq_varid(ncid, dimnames[2], &dimvarid[2]));
int kk = nc_get_att(ncid, dimvarid[2], "domain_decomposition", (void *)decomp[2]);
if(ii == 0) {
dimfullsize[0] = decomp[0][1] - decomp[0][0] + 1;
start[0] = decomp[0][2] - decomp[0][0] + 1;
end[0] = decomp[0][3] - decomp[0][0] + 1;
dimsize[0] = end[0] - start[0] + 1;
}
else {
start[0] = 1;
end[0] = dimlensall[ncvar.dimid[0]];
dimsize[0] = end[0] - start[0] + 1;
dimfullsize[0] = dimsize[0];
}
if(jj == 0) {
dimfullsize[1] = decomp[1][1] - decomp[1][0] + 1;
start[1] = decomp[1][2] - decomp[1][0] + 1;
end[1] = decomp[1][3] - decomp[1][0] + 1;
dimsize[1] = end[1] - start[1] + 1;
}
else {
start[1] = 1;
end[1] = dimlensall[ncvar.dimid[1]];
dimsize[1] = end[1] - start[1] + 1;
dimfullsize[1] = dimsize[1];
}
if(kk == 0) {
dimfullsize[2] = decomp[2][1] - decomp[2][0] + 1;
start[2] = decomp[2][2] - decomp[2][0] + 1;
end[2] = decomp[2][3] - decomp[2][0] + 1;
dimsize[2] = end[2] - start[2] + 1;
}
else {
start[2] = 1;
end[2] = dimlensall[ncvar.dimid[2]];
dimsize[2] = end[2] - start[2] + 1;
dimfullsize[2] = dimsize[2];
}
size_t jks = dimsize[2] * dimsize[1];
size_t jksall = dimfullsize[2] * dimfullsize[1];
size_t ks = dimsize[2];
size_t ksall = dimfullsize[2];
switch (ncvar.type) {
case NC_BYTE:
case NC_CHAR:
#pragma omp parallel for private(i, j, k, l, m, n)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
l = i + start[2] - 1;
alldatac[m * jksall + n * ksall + l] = datac[k * jks + j * ks + i];
}
}
}
break;
case NC_SHORT:
#pragma omp parallel for private(i, j, k, l, m, n)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
l = i + start[2] - 1;
alldatas[m * jksall + n * ksall + l] = datas[k * jks + j * ks + i];
}
}
}
break;
case NC_INT:
#pragma omp parallel for private(i, j, k, l, m, n)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
l = i + start[2] - 1;
alldatai[m * jksall + n * ksall + l] = datai[k * jks + j * ks + i];
}
}
}
break;
case NC_FLOAT:
#pragma omp parallel for private(i, j, k, l, m, n)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
l = i + start[2] - 1;
alldataf[m * jksall + n * ksall + l] = dataf[k * jks + j * ks + i];
}
}
}
break;
case NC_DOUBLE:
#pragma omp parallel for private(i, j, k, l, m, n)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
l = i + start[2] - 1;
alldatad[m * jksall + n * ksall + l] = datad[k * jks + j * ks + i];
}
}
}
break;
}
} else if (ncvar.ndims == 4){
char dimnames[4][256];
int dimvarid[4];
size_t start[4], end[4], dimsize[4], dimfullsize[4];
int decomp[4][4];
XXnc(nc_inq_dimname(ncid, ncvar.dimid[0], dimnames[0]));
XXnc(nc_inq_varid(ncid, dimnames[0], &dimvarid[0]));
int ii = nc_get_att(ncid, dimvarid[0], "domain_decomposition", (void *)decomp[0]);
XXnc(nc_inq_dimname(ncid, ncvar.dimid[1], dimnames[1]));
XXnc(nc_inq_varid(ncid, dimnames[1], &dimvarid[1]));
int jj = nc_get_att(ncid, dimvarid[1], "domain_decomposition", (void *)decomp[1]);
XXnc(nc_inq_dimname(ncid, ncvar.dimid[2], dimnames[2]));
XXnc(nc_inq_varid(ncid, dimnames[2], &dimvarid[2]));
int kk = nc_get_att(ncid, dimvarid[2], "domain_decomposition", (void *)decomp[2]);
XXnc(nc_inq_dimname(ncid, ncvar.dimid[3], dimnames[3]));
XXnc(nc_inq_varid(ncid, dimnames[3], &dimvarid[3]));
int ll = nc_get_att(ncid, dimvarid[3], "domain_decomposition", (void *)decomp[3]);
if(ii == 0) {
dimfullsize[0] = decomp[0][1] - decomp[0][0] + 1;
start[0] = decomp[0][2] - decomp[0][0] + 1;
end[0] = decomp[0][3] - decomp[0][0] + 1;
dimsize[0] = end[0] - start[0] + 1;
}
else {
start[0] = 1;
end[0] = dimlensall[ncvar.dimid[0]];
dimsize[0] = end[0] - start[0] + 1;
dimfullsize[0] = dimsize[0];
}
if(jj == 0) {
dimfullsize[1] = decomp[1][1] - decomp[1][0] + 1;
start[1] = decomp[1][2] - decomp[1][0] + 1;
end[1] = decomp[1][3] - decomp[1][0] + 1;
dimsize[1] = end[1] - start[1] + 1;
}
else {
start[1] = 1;
end[1] = dimlensall[ncvar.dimid[1]];
dimsize[1] = end[1] - start[1] + 1;
dimfullsize[1] = dimsize[1];
}
if(kk == 0) {
dimfullsize[2] = decomp[2][1] - decomp[2][0] + 1;
start[2] = decomp[2][2] - decomp[2][0] + 1;
end[2] = decomp[2][3] - decomp[2][0] + 1;
dimsize[2] = end[2] - start[2] + 1;
}
else {
start[2] = 1;
end[2] = dimlensall[ncvar.dimid[2]];
dimsize[2] = end[2] - start[2] + 1;
dimfullsize[2] = dimsize[2];
}
if(ll == 0) {
dimfullsize[3] = decomp[3][1] - decomp[3][0] + 1;
start[3] = decomp[3][2] - decomp[3][0] + 1;
end[3] = decomp[3][3] - decomp[3][0] + 1;
dimsize[3] = end[3] - start[3] + 1;
}
else {
start[3] = 1;
end[3] = dimlensall[ncvar.dimid[3]];
dimsize[3] = end[3] - start[3] + 1;
dimfullsize[3] = dimsize[3];
}
size_t jils = dimsize[3] * dimsize[2] * dimsize[1];
size_t jilsall = dimfullsize[3] * dimfullsize[2] * dimfullsize[1];
size_t ils = dimsize[3] * dimsize[2];
size_t ilsall = dimfullsize[3] * dimfullsize[2];
size_t ls = dimsize[3];
size_t lsall = dimfullsize[3];
switch (ncvar.type) {
case NC_BYTE:
case NC_CHAR:
#pragma omp parallel for private(k, l, m, n, p, q)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
for(l = 0; l < dimsize[3]; l++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
p = i + start[2] - 1;
q = l + start[3] - 1;
alldatac[m * jilsall + n * ilsall + p * lsall + q] = datac[k * jils + j * ils + i * ls + l];
}
}
}
}
break;
case NC_SHORT:
#pragma omp parallel for private(k, l, m, n, p, q)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
for(l = 0; l < dimsize[3]; l++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
p = i + start[2] - 1;
q = l + start[3] - 1;
alldatas[m * jilsall + n * ilsall + p * lsall + q] = datas[k * jils + j * ils + i * ls + l];
}
}
}
}
break;
case NC_INT:
#pragma omp parallel for private(k, l, m, n, p, q)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
for(l = 0; l < dimsize[3]; l++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
p = i + start[2] - 1;
q = l + start[3] - 1;
alldatai[m * jilsall + n * ilsall + p * lsall + q] = datai[k * jils + j * ils + i * ls + l];
}
}
}
}
break;
case NC_FLOAT:
#pragma omp parallel for private(k, l, m, n, p, q)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
for(l = 0; l < dimsize[3]; l++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
p = i + start[2] - 1;
q = l + start[3] - 1;
alldataf[m * jilsall + n * ilsall + p * lsall + q] = dataf[k * jils + j * ils + i * ls + l];
}
}
}
}
break;
case NC_DOUBLE:
#pragma omp parallel for private(k, l, m, n, p, q)
for(k = 0; k < dimsize[0]; k++) {
for(j = 0; j < dimsize[1]; j++) {
for(i = 0; i < dimsize[2]; i++) {
for(l = 0; l < dimsize[3]; l++) {
m = k + start[0] - 1;
n = j + start[1] - 1;
p = i + start[2] - 1;
q = l + start[3] - 1;
alldatad[m * jilsall + n * ilsall + p * lsall + q] = datad[k * jils + j * ils + i * ls + l];
}
}
}
}
break;
}
} else {
printf("ERROR: dims > 4\n");
exit(-1);
}
}
|
hello_omp.c | /* Hello World OpenMP
*
* Compile on Triton as:
* gcc -fopenmp hello_omp.c -o hello_omp
*
* degtyai1, Wed, 28 May 2014 12:47:47 +0300
* tuomiss1, Mon, 08 Jun 2020
*
*/
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
int main(void) {
#if defined(_OPENMP)
#pragma omp parallel
printf("Hello, world from thread %d.\n", omp_get_thread_num());
#else
printf("Hello, world.\n");
#endif
return 0;
}
|
parallel_for.h | /*
Copyright (c) 2013, Taiga Nomi and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#pragma once
#include <cassert>
#include <cstdio>
#include <limits>
#include <string>
#include <type_traits>
#include <vector>
#include "aligned_allocator.h"
#include "nn_error.h"
#include "tiny_dnn/config.h"
#ifdef CNN_USE_TBB
#ifndef NOMINMAX
#define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h
#endif
#include <tbb/task_group.h>
#include <tbb/tbb.h>
#endif
#if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD)
#include <future>
#include <thread>
#endif
#if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD)
#include <dispatch/dispatch.h>
#endif
namespace tiny_dnn {
#ifdef CNN_USE_TBB
static tbb::task_scheduler_init tbbScheduler(
tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred);
typedef tbb::blocked_range<size_t> blocked_range;
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
tbb::parallel_for(
blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f);
}
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
f(blocked_range(begin, end, 100));
}
#else
struct blocked_range {
typedef size_t const_iterator;
blocked_range(size_t begin, size_t end) : begin_(begin), end_(end) {}
blocked_range(int begin, int end)
: begin_(static_cast<size_t>(begin)), end_(static_cast<size_t>(end)) {}
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
private:
size_t begin_;
size_t end_;
};
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
blocked_range r(begin, end);
f(r);
}
#if defined(CNN_USE_OMP)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
#pragma omp parallel for
for (size_t i = begin; i < end; ++i) f(blocked_range(i, i + 1));
}
#elif defined(CNN_USE_GCD)
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
size_t count = end - begin;
size_t blockSize = grainsize;
if (count < blockSize || blockSize == 0) {
blockSize = 1;
}
size_t blockCount = (count + blockSize - 1) / blockSize;
assert(blockCount > 0);
dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0),
^(size_t block) {
size_t blockStart = block * blockSize;
size_t blockEnd = blockStart + blockSize;
if (blockEnd > end) {
blockEnd = end;
}
assert(blockStart < blockEnd);
f(blocked_range(blockStart, blockEnd));
});
}
#elif defined(CNN_SINGLE_THREAD)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
xparallel_for(begin, end, f);
}
#else
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
size_t nthreads = std::thread::hardware_concurrency();
size_t blockSize = (end - begin) / nthreads;
if (blockSize * nthreads < end - begin) blockSize++;
std::vector<std::future<void> > futures;
size_t blockBegin = begin;
size_t blockEnd = blockBegin + blockSize;
if (blockEnd > end) blockEnd = end;
for (size_t i = 0; i < nthreads; i++) {
futures.push_back(
std::move(std::async(std::launch::async, [blockBegin, blockEnd, &f] {
f(blocked_range(blockBegin, blockEnd));
})));
blockBegin += blockSize;
blockEnd = blockBegin + blockSize;
if (blockBegin >= end) break;
if (blockEnd > end) blockEnd = end;
}
for (auto &future : futures) future.wait();
}
#endif
#endif // CNN_USE_TBB
template <typename T, typename U>
bool value_representation(U const &value) {
return static_cast<U>(static_cast<T>(value)) == value;
}
template <typename T, typename Func>
inline void for_(std::true_type,
bool parallelize,
size_t begin,
T end,
Func f,
int grainsize = 100) {
parallelize = parallelize && value_representation<size_t>(end);
parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize)
: xparallel_for(begin, static_cast<size_t>(end), f);
}
template <typename T, typename Func>
inline void for_(std::false_type,
bool parallelize,
size_t begin,
T end,
Func f,
int grainsize = 100) {
parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize)
: xparallel_for(begin, end, f);
}
template <typename T, typename Func>
inline void for_(
bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) {
static_assert(std::is_integral<T>::value, "end must be integral type");
for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f,
grainsize);
}
template <typename T, typename Func>
void for_i(bool parallelize, T size, Func f, size_t grainsize = 100) {
#ifdef CNN_SINGLE_THREAD
parallelize = false;
#endif
for_(parallelize, 0, size,
[&](const blocked_range &r) {
#ifdef CNN_USE_OMP
#pragma omp parallel for
#endif
for (size_t i = r.begin(); i < r.end(); i++) f(i);
},
grainsize);
}
template <typename T, typename Func>
void for_i(T size, Func f, size_t grainsize = 100) {
for_i(true, size, f, grainsize);
}
} // namespace tiny_dnn
|
omp-for-decrement.c | #include <stdio.h>
int main() {
int i,j;
printf("loop 1: stride of -1, 10 - 0\n");
#pragma omp parallel for
for(i = 10; i > 0; i--)
{
printf("Hello world %d\n",i);
}
printf("loop 2: stride of -1, 10 - -1\n");
#pragma omp parallel for
for(i = 10; i > -1; i--)
{
printf("Hello world %d\n",i);
}
printf("loop 3: stride of -2, 10 - 0\n");
#pragma omp parallel for
for(i = 10; i > 0; i -= 2)
{
printf("Hello world %d\n",i);
}
printf("loop 4: stride of -2, 10 - -1\n");
#pragma omp parallel for
for(i = 10; i > -1; i -= 2)
{
printf("Hello world %d\n",i);
}
//stride larger than range
//range less than num_threads
//
return 0;
}
|
critical.c | #include <omp.h>
#include <stdio.h>
main()
{
int x;
x = 0;
#pragma omp parallel shared(x)
{
#pragma omp critical
x = x + 1;
} /* end of parallel section */
printf("out of the parallel region : X = %d\n",x);
}
|
gemv_x_coo.c | #include "alphasparse/kernel.h"
#include "alphasparse/kernel_plain.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
gemv_coo_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT nnz = A->nnz;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < nnz; i++)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT r = A->row_indx[i];
const ALPHA_INT c = A->col_indx[i];
ALPHA_Number v;
alpha_mul(v, A->values[i], x[c]);
alpha_madde(tmp[threadId][r], alpha, v);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
return gemv_coo_omp(alpha, A, x, beta, y);
}
|
GB_unop__bnot_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int64_int64)
// op(A') function: GB (_unop_tran__bnot_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_matvec.c | /******************************************************************************
* OpenMP Example - Matrix-vector multiplication - C/C++ Version
* FILE: omp_matvec.c
* DESCRIPTION:
* This example multiplies all row i elements of matrix A with vector
* element b(i) and stores the summed products in vector c(i). A total is
* maintained for the entire matrix. Performed by using the OpenMP loop
* work-sharing construct. The update of the shared global total is
* serialized by using the OpenMP critical directive.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#define SIZE 10
main ()
{
float A[SIZE][SIZE], b[SIZE], c[SIZE], total;
int i, j, tid;
/* Initializations */
total = 0.0;
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
A[i][j] = (j+1) * 1.0;
b[i] = 1.0 * (i+1);
c[i] = 0.0;
}
printf("\nStarting values of matrix A and vector b:\n");
for (i=0; i < SIZE; i++)
{
printf(" A[%d]= ",i);
for (j=0; j < SIZE; j++)
printf("%.1f ",A[i][j]);
printf(" b[%d]= %.1f\n",i,b[i]);
}
printf("\nResults by thread/row:\n");
/* Create a team of threads and scope variables */
#pragma omp parallel shared(A,b,c,total) private(tid,i)
{
tid = omp_get_thread_num();
/* Loop work-sharing construct - distribute rows of matrix */
#pragma omp for private(j)
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
c[i] += (A[i][j] * b[i]);
/* Update and display of running total must be serialized */
#pragma omp critical
{
total = total + c[i];
printf(" thread %d did row %d\t c[%d]=%.2f\t",tid,i,i,c[i]);
printf("Running total= %.2f\n",total);
}
} /* end of parallel i loop */
} /* end of parallel construct */
printf("\nMatrix-vector total - sum of all c[] = %.2f\n\n",total);
return 0;
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
uint64_t t; // timestamp when this edge inserted
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1), t(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w), t(1) {}
NodeWeight(NodeID_ v, WeightT_ w, uint64_t t) : v(v), w(w), t(t) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w << " " << nw.t;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w >> nw.t;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
return Neighborhood(n, out_index_, start_offset);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_, start_offset);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
merge-sort-parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define SIZE 1000000
void merge(int *x,int p,int q,int r){
int i,j,k,n1=q-p+1, n2=r-q, L[n1], R[n2];
for(i=0;i<n1;i++)
L[i]=x[p+i];
for(j=0;j<n2;j++)
R[j]=x[q+j+1];
L[n1]=R[n2]=SIZE;
i=j=0;
for(k=p;k<r;k++){
if(L[i]<=R[j])
x[k]=L[i++];
else
x[k]=R[j++];
}
while(i<n1)
x[k++]=L[i++];
while(j<n2)
x[k++]=R[j++];
}
void merge_sort(int *x,int p,int r){
if(p<r){
int q=(p+r)/2;
#pragma omp task firstprivate(x,p,q)
merge_sort(x,p,q);
#pragma omp task firstprivate(x,q,r)
merge_sort(x,q+1,r);
#pragma omp taskwait
merge(x,p,q,r);
}
}
void print_list(int *x,int n){
int i;
for(i=0;i<n;i++)
printf("%d\t",x[i]);
printf("\n");
}
double find_max(double arr[],int size){
int i;
double max=-1;
for(i=0;i<size;i++)
if(arr[i]>max)
max=arr[i];
return(max);
}
double find_min(double arr[],int size){
int i;
double min=arr[0];
for(i=1;i<size;i++)
if(arr[i]<min)
min=arr[i];
return(min);
}
int main(){
double start,end,max,min;
int tnum,data[SIZE];
#pragma omp parallel
{
tnum=omp_get_num_threads();
}
double alg_start[tnum],alg_end[tnum];
printf("No. of threads = %d\n",tnum);
int i;
for(i = 0; i < SIZE; i++) {
data[i] = SIZE-i;
}
printf("List before sorting\n");
print_list(data,SIZE);
start = omp_get_wtime();
#pragma omp parallel
{
alg_start[omp_get_thread_num()] = omp_get_wtime();
#pragma omp single
{
merge_sort(data,0,SIZE-1);
}
alg_end[omp_get_thread_num()] = omp_get_wtime();
}
end = omp_get_wtime();
printf("List after sorting\n");
print_list(data,SIZE);
max = find_max(alg_end,tnum);
min = find_min(alg_start,tnum);
printf("Stage 1 total runtime = %f\n",end-start);
printf("Stage 1 alg runtime = %f\n",max-min);
printf("Stage 1 overhead = %f\n",(end-start)-(max-min));
return 0;
} |
FeatureFinderAlgorithmPicked.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2013.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Oliver Kohlbacher, Stephan Aiche $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#ifndef OPENMS_TRANSFORMATIONS_FEATUREFINDER_FEATUREFINDERALGORITHMPICKED_H
#define OPENMS_TRANSFORMATIONS_FEATUREFINDER_FEATUREFINDERALGORITHMPICKED_H
#include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/FeatureFinderAlgorithm.h>
#include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/FeatureFinderAlgorithmPickedHelperStructs.h>
#include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/TraceFitter.h>
#include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/EGHTraceFitter.h>
#include <OpenMS/TRANSFORMATIONS/FEATUREFINDER/GaussTraceFitter.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/FeatureXMLFile.h>
#include <OpenMS/FORMAT/TextFile.h>
#include <OpenMS/CHEMISTRY/IsotopeDistribution.h>
#include <OpenMS/MATH/STATISTICS/StatisticFunctions.h>
#include <OpenMS/MATH/MISC/MathFunctions.h>
#include <OpenMS/CONCEPT/Constants.h>
#include <OpenMS/CHEMISTRY/Element.h>
#include <OpenMS/CHEMISTRY/ElementDB.h>
#include <OpenMS/CHEMISTRY/IsotopeDistribution.h>
#include <boost/math/special_functions/fpclassify.hpp>
#include <numeric>
#include <fstream>
#include <algorithm>
#include <QtCore/QDir>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace OpenMS
{
/**
@brief FeatureFinderAlgorithm for picked peaks.
@htmlinclude OpenMS_FeatureFinderAlgorithmPicked.parameters
@improvement RT model with tailing/fronting (Marc)
@improvement More general MZ model - e.g. based on co-elution or with sulfur-averagines (Marc)
@todo Fix output in parallel mode, change assignment of charges to threads, add parallel TOPP test (Marc)
@todo Implement user-specified seed lists support (Marc)
@ingroup FeatureFinder
*/
template <class PeakType, class FeatureType>
class FeatureFinderAlgorithmPicked :
public FeatureFinderAlgorithm<PeakType, FeatureType>,
public FeatureFinderDefs
{
public:
/// @name Type definitions
//@{
typedef typename FeatureFinderAlgorithm<PeakType, FeatureType>::MapType MapType;
typedef typename FeatureFinderAlgorithm<PeakType, FeatureType>::FeatureMapType FeatureMapType;
typedef typename MapType::SpectrumType SpectrumType;
typedef typename SpectrumType::FloatDataArrays FloatDataArrays;
//@}
using FeatureFinderAlgorithm<PeakType, FeatureType>::param_;
using FeatureFinderAlgorithm<PeakType, FeatureType>::features_;
using FeatureFinderAlgorithm<PeakType, FeatureType>::ff_;
using FeatureFinderAlgorithm<PeakType, FeatureType>::defaults_;
protected:
typedef FeatureFinderAlgorithmPickedHelperStructs::Seed Seed;
typedef typename FeatureFinderAlgorithmPickedHelperStructs::MassTrace<PeakType> MassTrace;
typedef typename FeatureFinderAlgorithmPickedHelperStructs::MassTraces<PeakType> MassTraces;
typedef FeatureFinderAlgorithmPickedHelperStructs::TheoreticalIsotopePattern TheoreticalIsotopePattern;
typedef FeatureFinderAlgorithmPickedHelperStructs::IsotopePattern IsotopePattern;
public:
/// default constructor
FeatureFinderAlgorithmPicked() :
FeatureFinderAlgorithm<PeakType, FeatureType>(),
map_(),
log_()
{
//debugging
defaults_.setValue("debug", "false", "When debug mode is activated, several files with intermediate results are written to the folder 'debug' (do not use in parallel mode).");
defaults_.setValidStrings("debug", ListUtils::create<String>("true,false"));
//intensity
defaults_.setValue("intensity:bins", 10, "Number of bins per dimension (RT and m/z). The higher this value, the more local the intensity significance score is.\nThis parameter should be decreased, if the algorithm is used on small regions of a map.");
defaults_.setMinInt("intensity:bins", 1);
defaults_.setSectionDescription("intensity", "Settings for the calculation of a score indicating if a peak's intensity is significant in the local environment (between 0 and 1)");
//mass trace search parameters
defaults_.setValue("mass_trace:mz_tolerance", 0.03, "Tolerated m/z deviation of peaks belonging to the same mass trace.\nIt should be larger than the m/z resolution of the instrument.\nThis value must be smaller than that 1/charge_high!");
defaults_.setMinFloat("mass_trace:mz_tolerance", 0.0);
defaults_.setValue("mass_trace:min_spectra", 10, "Number of spectra that have to show a similar peak mass in a mass trace.");
defaults_.setMinInt("mass_trace:min_spectra", 1);
defaults_.setValue("mass_trace:max_missing", 1, "Number of consecutive spectra where a high mass deviation or missing peak is acceptable.\nThis parameter should be well below 'min_spectra'!");
defaults_.setMinInt("mass_trace:max_missing", 0);
defaults_.setValue("mass_trace:slope_bound", 0.1, "The maximum slope of mass trace intensities when extending from the highest peak.\nThis parameter is important to seperate overlapping elution peaks.\nIt should be increased if feature elution profiles fluctuate a lot.");
defaults_.setMinFloat("mass_trace:slope_bound", 0.0);
defaults_.setSectionDescription("mass_trace", "Settings for the calculation of a score indicating if a peak is part of a mass trace (between 0 and 1).");
//Isotopic pattern search parameters
defaults_.setValue("isotopic_pattern:charge_low", 1, "Lowest charge to search for.");
defaults_.setMinInt("isotopic_pattern:charge_low", 1);
defaults_.setValue("isotopic_pattern:charge_high", 4, "Highest charge to search for.");
defaults_.setMinInt("isotopic_pattern:charge_high", 1);
defaults_.setValue("isotopic_pattern:mz_tolerance", 0.03, "Tolerated m/z deviation from the theoretical isotopic pattern.\nIt should be larger than the m/z resolution of the instrument.\nThis value must be smaller than that 1/charge_high!");
defaults_.setMinFloat("isotopic_pattern:mz_tolerance", 0.0);
defaults_.setValue("isotopic_pattern:intensity_percentage", 10.0, "Isotopic peaks that contribute more than this percentage to the overall isotope pattern intensity must be present.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("isotopic_pattern:intensity_percentage", 0.0);
defaults_.setMaxFloat("isotopic_pattern:intensity_percentage", 100.0);
defaults_.setValue("isotopic_pattern:intensity_percentage_optional", 0.1, "Isotopic peaks that contribute more than this percentage to the overall isotope pattern intensity can be missing.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("isotopic_pattern:intensity_percentage_optional", 0.0);
defaults_.setMaxFloat("isotopic_pattern:intensity_percentage_optional", 100.0);
defaults_.setValue("isotopic_pattern:optional_fit_improvement", 2.0, "Minimal percental improvement of isotope fit to allow leaving out an optional peak.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("isotopic_pattern:optional_fit_improvement", 0.0);
defaults_.setMaxFloat("isotopic_pattern:optional_fit_improvement", 100.0);
defaults_.setValue("isotopic_pattern:mass_window_width", 25.0, "Window width in Dalton for precalculation of estimated isotope distributions.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("isotopic_pattern:mass_window_width", 1.0);
defaults_.setMaxFloat("isotopic_pattern:mass_window_width", 200.0);
defaults_.setValue("isotopic_pattern:abundance_12C", 98.93, "Rel. abundance of the light carbon. Modify if labeled.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("isotopic_pattern:abundance_12C", 0.0);
defaults_.setMaxFloat("isotopic_pattern:abundance_12C", 100.0);
defaults_.setValue("isotopic_pattern:abundance_14N", 99.632, "Rel. abundance of the light nitrogen. Modify if labeled.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("isotopic_pattern:abundance_14N", 0.0);
defaults_.setMaxFloat("isotopic_pattern:abundance_14N", 100.0);
defaults_.setSectionDescription("isotopic_pattern", "Settings for the calculation of a score indicating if a peak is part of a isotopic pattern (between 0 and 1).");
//Seed settings
defaults_.setValue("seed:min_score", 0.8, "Minimum seed score a peak has to reach to be used as seed.\nThe seed score is the geometric mean of intensity score, mass trace score and isotope pattern score.\nIf your features show a large deviation from the averagene isotope distribution or from an gaussian elution profile, lower this score.");
defaults_.setMinFloat("seed:min_score", 0.0);
defaults_.setMaxFloat("seed:min_score", 1.0);
defaults_.setSectionDescription("seed", "Settings that determine which peaks are considered a seed");
//Fitting settings
defaults_.setValue("fit:epsilon_abs", 0.0001, "Absolute epsilon used for convergence of the fit.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("fit:epsilon_abs", 0.0);
defaults_.setValue("fit:epsilon_rel", 0.0001, "Relative epsilon used for convergence of the fit.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("fit:epsilon_rel", 0.0);
defaults_.setValue("fit:max_iterations", 500, "Maximum number of iterations of the fit.", ListUtils::create<String>("advanced"));
defaults_.setMinInt("fit:max_iterations", 1);
defaults_.setSectionDescription("fit", "Settings for the model fitting");
//Feature settings
defaults_.setValue("feature:min_score", 0.7, "Feature score threshold for a feature to be reported.\nThe feature score is the geometric mean of the average relative deviation and the correlation between the model and the observed peaks.");
defaults_.setMinFloat("feature:min_score", 0.0);
defaults_.setMaxFloat("feature:min_score", 1.0);
defaults_.setValue("feature:min_isotope_fit", 0.8, "Minimum isotope fit of the feature before model fitting.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("feature:min_isotope_fit", 0.0);
defaults_.setMaxFloat("feature:min_isotope_fit", 1.0);
defaults_.setValue("feature:min_trace_score", 0.5, "Trace score threshold.\nTraces below this threshold are removed after the model fitting.\nThis parameter is important for features that overlap in m/z dimension.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("feature:min_trace_score", 0.0);
defaults_.setMaxFloat("feature:min_trace_score", 1.0);
defaults_.setValue("feature:min_rt_span", 0.333, "Minimum RT span in relation to extended area that has to remain after model fitting.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("feature:min_rt_span", 0.0);
defaults_.setMaxFloat("feature:min_rt_span", 1.0);
defaults_.setValue("feature:max_rt_span", 2.5, "Maximum RT span in relation to extended area that the model is allowed to have.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("feature:max_rt_span", 0.5);
defaults_.setValue("feature:rt_shape", "symmetric", "Choose model used for RT profile fitting. If set to symmetric a gauss shape is used, in case of asymmetric an EGH shape is used.", ListUtils::create<String>("advanced"));
defaults_.setValidStrings("feature:rt_shape", ListUtils::create<String>("symmetric,asymmetric"));
defaults_.setValue("feature:max_intersection", 0.35, "Maximum allowed intersection of features.", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("feature:max_intersection", 0.0);
defaults_.setMaxFloat("feature:max_intersection", 1.0);
defaults_.setValue("feature:reported_mz", "monoisotopic", "The mass type that is reported for features.\n'maximum' returns the m/z value of the highest mass trace.\n'average' returns the intensity-weighted average m/z value of all contained peaks.\n'monoisotopic' returns the monoisotopic m/z value derived from the fitted isotope model.");
defaults_.setValidStrings("feature:reported_mz", ListUtils::create<String>("maximum,average,monoisotopic"));
defaults_.setSectionDescription("feature", "Settings for the features (intensity, quality assessment, ...)");
//user-specified seed settings
defaults_.setValue("user-seed:rt_tolerance", 5.0, "Allowed RT deviation of seeds from the user-specified seed position.");
defaults_.setMinFloat("user-seed:rt_tolerance", 0.0);
defaults_.setValue("user-seed:mz_tolerance", 1.1, "Allowed m/z deviation of seeds from the user-specified seed position.");
defaults_.setMinFloat("user-seed:mz_tolerance", 0.0);
defaults_.setValue("user-seed:min_score", 0.5, "Overwrites 'seed:min_score' for user-specified seeds. The cutoff is typically a bit lower in this case.");
defaults_.setMinFloat("user-seed:min_score", 0.0);
defaults_.setMaxFloat("user-seed:min_score", 1.0);
defaults_.setSectionDescription("user-seed", "Settings for user-specified seeds.");
//debug settings
defaults_.setValue("debug:pseudo_rt_shift", 500.0, "Pseudo RT shift used when .", ListUtils::create<String>("advanced"));
defaults_.setMinFloat("debug:pseudo_rt_shift", 1.0);
this->defaultsToParam_();
}
// docu in base class
virtual void setSeeds(const FeatureMapType& seeds)
{
seeds_ = seeds;
}
/// Main method for actual FeatureFinder
virtual void run()
{
//-------------------------------------------------------------------------
//General initialization
//---------------------------------------------------------------------------
//quality estimation
DoubleReal min_feature_score = param_.getValue("feature:min_score");
//charges to look at
SignedSize charge_low = (Int)param_.getValue("isotopic_pattern:charge_low");
SignedSize charge_high = (Int)param_.getValue("isotopic_pattern:charge_high");
//fitting settings
UInt max_iterations = param_.getValue("fit:max_iterations");
DoubleReal epsilon_abs = param_.getValue("fit:epsilon_abs");
DoubleReal epsilon_rel = param_.getValue("fit:epsilon_rel");
Size max_isotopes = 20;
// check if non-natural isotopic abundances are set. If so modify
DoubleReal abundance_12C = param_.getValue("isotopic_pattern:abundance_12C");
DoubleReal abundance_14N = param_.getValue("isotopic_pattern:abundance_14N");
const Element* carbon_const = ElementDB::getInstance()->getElement("Carbon");
Element* carbon = const_cast<Element*>(carbon_const);
if (param_.getValue("isotopic_pattern:abundance_12C") != defaults_.getValue("isotopic_pattern:abundance_12C"))
{
max_isotopes += 1000;
IsotopeDistribution isotopes;
std::vector<std::pair<Size, double> > container;
container.push_back(std::make_pair(12, abundance_12C / 100.0));
container.push_back(std::make_pair(13, 1.0 - (abundance_12C / 100.0)));
isotopes.set(container);
carbon->setIsotopeDistribution(isotopes);
}
const Element* nitrogen_const = ElementDB::getInstance()->getElement("Nitrogen");
Element* nitrogen = const_cast<Element*>(nitrogen_const);
if (param_.getValue("isotopic_pattern:abundance_14N") != defaults_.getValue("isotopic_pattern:abundance_14N"))
{
max_isotopes += 1000;
IsotopeDistribution isotopes;
std::vector<std::pair<Size, double> > container;
container.push_back(std::make_pair(14, abundance_14N / 100.0));
container.push_back(std::make_pair(15, 1.0 - (abundance_14N / 100.0)));
isotopes.set(container);
nitrogen->setIsotopeDistribution(isotopes);
}
// initialize trace fitter parameters here to avoid
// bug https://sourceforge.net/apps/trac/open-ms/ticket/147
Param trace_fitter_params;
trace_fitter_params.setValue("max_iteration", max_iterations);
trace_fitter_params.setValue("epsilon_abs", epsilon_abs);
trace_fitter_params.setValue("epsilon_rel", epsilon_rel);
//copy the input map
map_ = *(FeatureFinderAlgorithm<PeakType, FeatureType>::map_);
//flag for user-specified seed mode
bool user_seeds = (seeds_.size() > 0);
if (user_seeds)
{
seeds_.sortByMZ();
}
DoubleReal user_rt_tol = param_.getValue("user-seed:rt_tolerance");
DoubleReal user_mz_tol = param_.getValue("user-seed:mz_tolerance");
DoubleReal user_seed_score = param_.getValue("user-seed:min_score");
//reserve space for calculated scores
UInt charge_count = charge_high - charge_low + 1;
for (Size s = 0; s < map_.size(); ++s)
{
Size scan_size = map_[s].size();
map_[s].getFloatDataArrays().resize(3 + 2 * charge_count);
map_[s].getFloatDataArrays()[0].setName("trace_score");
map_[s].getFloatDataArrays()[0].assign(scan_size, 0.0);
map_[s].getFloatDataArrays()[1].setName("intensity_score");
map_[s].getFloatDataArrays()[1].assign(scan_size, 0.0);
map_[s].getFloatDataArrays()[2].setName("local_max");
map_[s].getFloatDataArrays()[2].assign(scan_size, 0.0);
//create isotope pattern score arrays
UInt charge = charge_low;
for (Size i = 3; i < 3 + charge_count; ++i)
{
map_[s].getFloatDataArrays()[i].setName(String("pattern_score_") + charge);
map_[s].getFloatDataArrays()[i].assign(scan_size, 0.0);
++charge;
}
//create overall score arrays
charge = charge_low;
for (Size i = 3 + charge_count; i < 3 + 2 * charge_count; ++i)
{
map_[s].getFloatDataArrays()[i].setName(String("overall_score_") + charge);
map_[s].getFloatDataArrays()[i].assign(scan_size, 0.0);
++charge;
}
}
int gl_progress = 0;
debug_ = ((String)(param_.getValue("debug")) == "true");
//clean up / create folders for debug information
if (debug_)
{
QDir dir(".");
dir.mkpath("debug/features");
log_.open("debug/log.txt");
}
//---------------------------------------------------------------------------
//Step 1:
//Precalculate intensity scores for peaks
//---------------------------------------------------------------------------
if (debug_) log_ << "Precalculating intensity thresholds ..." << std::endl;
//new scope to make local variables disappear
{
ff_->startProgress(0, intensity_bins_ * intensity_bins_, "Precalculating intensity scores");
DoubleReal rt_start = map_.getMinRT();
DoubleReal mz_start = map_.getMinMZ();
intensity_rt_step_ = (map_.getMaxRT() - rt_start) / (DoubleReal)intensity_bins_;
intensity_mz_step_ = (map_.getMaxMZ() - mz_start) / (DoubleReal)intensity_bins_;
intensity_thresholds_.resize(intensity_bins_);
for (Size rt = 0; rt < intensity_bins_; ++rt)
{
intensity_thresholds_[rt].resize(intensity_bins_);
DoubleReal min_rt = rt_start + rt * intensity_rt_step_;
DoubleReal max_rt = rt_start + (rt + 1) * intensity_rt_step_;
std::vector<DoubleReal> tmp;
for (Size mz = 0; mz < intensity_bins_; ++mz)
{
ff_->setProgress(rt * intensity_bins_ + mz);
DoubleReal min_mz = mz_start + mz * intensity_mz_step_;
DoubleReal max_mz = mz_start + (mz + 1) * intensity_mz_step_;
//std::cout << "rt range: " << min_rt << " - " << max_rt << std::endl;
//std::cout << "mz range: " << min_mz << " - " << max_mz << std::endl;
tmp.clear();
for (typename MapType::ConstAreaIterator it = map_.areaBeginConst(min_rt, max_rt, min_mz, max_mz); it != map_.areaEndConst(); ++it)
{
tmp.push_back(it->getIntensity());
}
//init vector
intensity_thresholds_[rt][mz].assign(21, 0.0);
//store quantiles (20)
if (!tmp.empty())
{
std::sort(tmp.begin(), tmp.end());
for (Size i = 0; i < 21; ++i)
{
Size index = (Size) std::floor(0.05 * i * (tmp.size() - 1));
intensity_thresholds_[rt][mz][i] = tmp[index];
}
}
}
}
//store intensity score in PeakInfo
for (Size s = 0; s < map_.size(); ++s)
{
for (Size p = 0; p < map_[s].size(); ++p)
{
map_[s].getFloatDataArrays()[1][p] = intensityScore_(s, p);
}
}
ff_->endProgress();
}
//---------------------------------------------------------------------------
//Step 2:
//Precalculate mass trace scores and local trace maximum for each peak
//---------------------------------------------------------------------------
//new scope to make local variables disappear
{
Size end_iteration = map_.size() - std::min((Size) min_spectra_, map_.size());
ff_->startProgress(min_spectra_, end_iteration, "Precalculating mass trace scores");
// skip first and last scans since we cannot extend the mass traces there
for (Size s = min_spectra_; s < end_iteration; ++s)
{
ff_->setProgress(s);
const SpectrumType& spectrum = map_[s];
//iterate over all peaks of the scan
for (Size p = 0; p < spectrum.size(); ++p)
{
std::vector<DoubleReal> scores;
scores.reserve(2 * min_spectra_);
DoubleReal pos = spectrum[p].getMZ();
Real inte = spectrum[p].getIntensity();
//if(debug_) log_ << std::endl << "Peak: " << pos << std::endl;
bool is_max_peak = true; //checking the maximum intensity peaks -> use them later as feature seeds.
for (Size i = 1; i <= min_spectra_; ++i)
{
try
{
Size spec_index = map_[s + i].findNearest(pos);
DoubleReal position_score = positionScore_(pos, map_[s + i][spec_index].getMZ(), trace_tolerance_);
if (position_score > 0 && map_[s + i][spec_index].getIntensity() > inte) is_max_peak = false;
scores.push_back(position_score);
}
catch (...) //no peaks in the spectrum
{
scores.push_back(0.0);
}
}
for (Size i = 1; i <= min_spectra_; ++i)
{
try
{
Size spec_index = map_[s - i].findNearest(pos);
DoubleReal position_score = positionScore_(pos, map_[s - i][spec_index].getMZ(), trace_tolerance_);
if (position_score > 0 && map_[s - i][spec_index].getIntensity() > inte) is_max_peak = false;
scores.push_back(position_score);
}
catch (...) //no peaks in the spectrum
{
scores.push_back(0.0);
}
}
//Calculate a consensus score out of the scores calculated before
DoubleReal trace_score = std::accumulate(scores.begin(), scores.end(), 0.0) / scores.size();
//store final score for later use
map_[s].getFloatDataArrays()[0][p] = trace_score;
map_[s].getFloatDataArrays()[2][p] = is_max_peak;
}
}
ff_->endProgress();
}
//---------------------------------------------------------------------------
//Step 2.5:
//Precalculate isotope distributions for interesting mass ranges
//---------------------------------------------------------------------------
//new scope to make local variables disappear
{
DoubleReal max_mass = map_.getMaxMZ() * charge_high;
Size num_isotopes = std::ceil(max_mass / mass_window_width_) + 1;
ff_->startProgress(0, num_isotopes, "Precalculating isotope distributions");
//reserve enough space
isotope_distributions_.resize(num_isotopes);
//calculate distribution if necessary
for (Size index = 0; index < num_isotopes; ++index)
{
//if(debug_) log_ << "Calculating iso dist for mass: " << 0.5*mass_window_width_ + index * mass_window_width_ << std::endl;
IsotopeDistribution d;
d.setMaxIsotope(max_isotopes);
d.estimateFromPeptideWeight(0.5 * mass_window_width_ + index * mass_window_width_);
//trim left and right. And store the number of isotopes on the left, to reconstruct the monoisotopic peak
Size size_before = d.size();
d.trimLeft(intensity_percentage_optional_);
isotope_distributions_[index].trimmed_left = size_before - d.size();
d.trimRight(intensity_percentage_optional_);
for (IsotopeDistribution::Iterator it = d.begin(); it != d.end(); ++it)
{
isotope_distributions_[index].intensity.push_back(it->second);
//if(debug_) log_ << " - " << it->second << std::endl;
}
//determine the number of optional peaks at the beginning/end
Size begin = 0;
Size end = 0;
bool is_begin = true;
bool is_end = false;
for (Size i = 0; i < isotope_distributions_[index].intensity.size(); ++i)
{
if (isotope_distributions_[index].intensity[i] < intensity_percentage_)
{
if (!is_end && !is_begin) is_end = true;
if (is_begin) ++begin;
else if (is_end) ++end;
}
else if (is_begin)
{
is_begin = false;
}
}
isotope_distributions_[index].optional_begin = begin;
isotope_distributions_[index].optional_end = end;
//scale the distribution to a maximum of 1
DoubleReal max = 0.0;
for (Size i = 0; i < isotope_distributions_[index].intensity.size(); ++i)
{
if (isotope_distributions_[index].intensity[i] > max)
{
max = isotope_distributions_[index].intensity[i];
}
}
isotope_distributions_[index].max = max;
for (Size i = 0; i < isotope_distributions_[index].intensity.size(); ++i)
{
isotope_distributions_[index].intensity[i] /= max;
}
//if(debug_) log_ << " - optional begin/end:" << begin << " / " << end << std::endl;
}
ff_->endProgress();
}
//-------------------------------------------------------------------------
//Step 3:
//Charge loop (create seeds and features for each charge separately)
//-------------------------------------------------------------------------
Int plot_nr_global = -1; //counter for the number of plots (debug info)
Int feature_nr_global = 0; //counter for the number of features (debug info)
for (SignedSize c = charge_low; c <= charge_high; ++c)
{
UInt meta_index_isotope = 3 + c - charge_low;
UInt meta_index_overall = 3 + charge_count + c - charge_low;
Size feature_candidates = 0;
std::vector<Seed> seeds;
//-----------------------------------------------------------
//Step 3.1: Precalculate IsotopePattern score
//-----------------------------------------------------------
ff_->startProgress(0, map_.size(), String("Calculating isotope pattern scores for charge ") + String(c));
for (Size s = 0; s < map_.size(); ++s)
{
ff_->setProgress(s);
const SpectrumType& spectrum = map_[s];
for (Size p = 0; p < spectrum.size(); ++p)
{
DoubleReal mz = spectrum[p].getMZ();
//get isotope distribution for this mass
const TheoreticalIsotopePattern& isotopes = getIsotopeDistribution_(mz * c);
//determine highest peak in isotope distribution
Size max_isotope = std::max_element(isotopes.intensity.begin(), isotopes.intensity.end()) - isotopes.intensity.begin();
//Look up expected isotopic peaks (in the current spectrum or adjacent spectra)
Size peak_index = spectrum.findNearest(mz - ((DoubleReal)(isotopes.size() + 1) / c));
IsotopePattern pattern(isotopes.size());
for (Size i = 0; i < isotopes.size(); ++i)
{
DoubleReal isotope_pos = mz + ((DoubleReal)i - max_isotope) / c;
findIsotope_(isotope_pos, s, pattern, i, peak_index);
}
DoubleReal pattern_score = isotopeScore_(isotopes, pattern, true);
//update pattern scores of all contained peaks (if necessary)
if (pattern_score > 0.0)
{
for (Size i = 0; i < pattern.peak.size(); ++i)
{
if (pattern.peak[i] >= 0 && pattern_score > map_[pattern.spectrum[i]].getFloatDataArrays()[meta_index_isotope][pattern.peak[i]])
{
map_[pattern.spectrum[i]].getFloatDataArrays()[meta_index_isotope][pattern.peak[i]] = pattern_score;
}
}
}
}
}
ff_->endProgress();
//-----------------------------------------------------------
//Step 3.2:
//Find seeds for this charge
//-----------------------------------------------------------
Size end_of_iteration = map_.size() - std::min((Size) min_spectra_, map_.size());
ff_->startProgress(min_spectra_, end_of_iteration, String("Finding seeds for charge ") + String(c));
DoubleReal min_seed_score = param_.getValue("seed:min_score");
//do nothing for the first few and last few spectra as the scans required to search for traces are missing
for (Size s = min_spectra_; s < end_of_iteration; ++s)
{
ff_->setProgress(s);
//iterate over peaks
for (Size p = 0; p < map_[s].size(); ++p)
{
FloatDataArrays& meta = map_[s].getFloatDataArrays();
DoubleReal overall_score = std::pow(meta[0][p] * meta[1][p] * meta[meta_index_isotope][p], 1.0f / 3.0f);
meta[meta_index_overall][p] = overall_score;
//add seed to vector if certain conditions are fulfilled
if (meta[2][p] != 0.0) // local maximum of mass trace is prerequisite for all features
{
//automatic seeds: overall score greater than the min seed score
if (!user_seeds && overall_score >= min_seed_score)
{
Seed seed;
seed.spectrum = s;
seed.peak = p;
seed.intensity = map_[s][p].getIntensity();
seeds.push_back(seed);
}
//user-specified seeds: overall score greater than USER min seed score
else if (user_seeds && overall_score >= user_seed_score)
{
//only consider seeds, if they are near a user-specified seed
FeatureType tmp;
tmp.setMZ(map_[s][p].getMZ() - user_mz_tol);
for (typename FeatureMapType::const_iterator it = std::lower_bound(seeds_.begin(), seeds_.end(), tmp, typename FeatureType::MZLess()); it < seeds_.end(); ++it)
{
if (it->getMZ() > map_[s][p].getMZ() + user_mz_tol)
{
break;
}
if (fabs(it->getMZ() - map_[s][p].getMZ()) < user_mz_tol &&
fabs(it->getRT() - map_[s].getRT()) < user_rt_tol)
{
Seed seed;
seed.spectrum = s;
seed.peak = p;
seed.intensity = map_[s][p].getIntensity();
seeds.push_back(seed);
break;
}
}
}
}
}
}
//sort seeds according to intensity
std::sort(seeds.rbegin(), seeds.rend());
//create and store seeds map and selected peak map
if (debug_)
{
//seeds
FeatureMap<> seed_map;
seed_map.reserve(seeds.size());
for (Size i = 0; i < seeds.size(); ++i)
{
Size spectrum = seeds[i].spectrum;
Size peak = seeds[i].peak;
const FloatDataArrays& meta = map_[spectrum].getFloatDataArrays();
Feature tmp;
tmp.setIntensity(seeds[i].intensity);
tmp.setOverallQuality(meta[meta_index_overall][peak]);
tmp.setRT(map_[spectrum].getRT());
tmp.setMZ(map_[spectrum][peak].getMZ());
tmp.setMetaValue("intensity_score", meta[1][peak]);
tmp.setMetaValue("pattern_score", meta[meta_index_isotope][peak]);
tmp.setMetaValue("trace_score", meta[0][peak]);
seed_map.push_back(tmp);
}
FeatureXMLFile().store(String("debug/seeds_") + String(c) + ".featureXML", seed_map);
}
ff_->endProgress();
std::cout << "Found " << seeds.size() << " seeds for charge " << c << "." << std::endl;
//------------------------------------------------------------------
//Step 3.3:
//Extension of seeds
//------------------------------------------------------------------
// We do not want to store features whose seeds lie within other
// features with higher intensity. We thus store this information in
// the map seeds_in_features which contains for each seed i a vector
// of other seeds that are contained in the corresponding feature i.
//
// The features are stored in an temporary feature map until it is
// decided whether they are contained within a seed of higher
// intensity.
std::map<Size, std::vector<Size> > seeds_in_features;
typedef std::map<Size, FeatureType> FeatureMapType;
FeatureMapType tmp_feature_map;
gl_progress = 0;
ff_->startProgress(0, seeds.size(), String("Extending seeds for charge ") + String(c));
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < (SignedSize)seeds.size(); ++i)
{
//------------------------------------------------------------------
//Step 3.3.1:
//Extend all mass traces
//------------------------------------------------------------------
const SpectrumType& spectrum = map_[seeds[i].spectrum];
const PeakType& peak = spectrum[seeds[i].peak];
IF_MASTERTHREAD
{
ff_->setProgress(gl_progress++);
if (debug_)
{
log_ << std::endl << "Seed " << i << ":" << std::endl;
//If the intensity is zero this seed is already uses in another feature
log_ << " - Int: " << peak.getIntensity() << std::endl;
log_ << " - RT: " << spectrum.getRT() << std::endl;
log_ << " - MZ: " << peak.getMZ() << std::endl;
}
}
//----------------------------------------------------------------
//Find best fitting isotope pattern for this charge (using averagine)
IsotopePattern best_pattern(0);
DoubleReal isotope_fit_quality = findBestIsotopeFit_(seeds[i], c, best_pattern);
if (isotope_fit_quality < min_isotope_fit_)
{
abort_(seeds[i], "Could not find good enough isotope pattern containing the seed");
//continue;
}
else
{
//extend the convex hull in RT dimension (starting from the trace peaks)
MassTraces traces;
traces.reserve(best_pattern.peak.size());
extendMassTraces_(best_pattern, traces, meta_index_overall);
//check if the traces are still valid
DoubleReal seed_mz = map_[seeds[i].spectrum][seeds[i].peak].getMZ();
if (!traces.isValid(seed_mz, trace_tolerance_))
{
abort_(seeds[i], "Could not extend seed");
//continue;
}
else
{
//------------------------------------------------------------------
//Step 3.3.2:
//Gauss/EGH fit (first fit to find the feature boundaries)
//------------------------------------------------------------------
Int plot_nr = -1;
#ifdef _OPENMP
#pragma omp critical (FeatureFinderAlgorithmPicked_PLOTNR)
#endif
{
plot_nr = ++plot_nr_global;
}
//------------------------------------------------------------------
//TODO try fit with baseline term once more
//baseline estimate
traces.updateBaseline();
traces.baseline = 0.75 * traces.baseline;
traces[traces.max_trace].updateMaximum();
// choose fitter
double egh_tau = 0.0;
TraceFitter<PeakType>* fitter = chooseTraceFitter_(egh_tau);
fitter->setParameters(trace_fitter_params);
fitter->fit(traces);
#if 0
TraceFitter<PeakType>* alt_fitter = new GaussTraceFitter<PeakType>();
Param alt_p;
alt_p.setValue("max_iteration", max_iterations);
alt_p.setValue("epsilon_abs", epsilon_abs);
alt_p.setValue("epsilon_rel", epsilon_rel);
alt_fitter->setParameters(alt_p);
alt_fitter->fit(traces);
LOG_DEBUG << "EGH: " << fitter->getCenter() << " " << fitter->getHeight() << std::endl;
LOG_DEBUG << "GAUSS: " << alt_fitter->getCenter() << " " << alt_fitter->getHeight() << std::endl;
#endif
// what should come out
// left "sigma"
// right "sigma"
// x0 .. "center" position of RT fit
// height .. "height" of RT fit
//------------------------------------------------------------------
//------------------------------------------------------------------
//Step 3.3.3:
//Crop feature according to RT fit (2.5*sigma) and remove badly fitting traces
//------------------------------------------------------------------
MassTraces new_traces;
cropFeature_(fitter, traces, new_traces);
//------------------------------------------------------------------
//Step 3.3.4:
//Check if feature is ok
//------------------------------------------------------------------
String error_msg = "";
DoubleReal fit_score = 0.0;
DoubleReal correlation = 0.0;
DoubleReal final_score = 0.0;
bool feature_ok = checkFeatureQuality_(fitter, new_traces, seed_mz, min_feature_score, error_msg, fit_score, correlation, final_score);
#ifdef _OPENMP
#pragma omp critical (FeatureFinderAlgorithmPicked_DEBUG)
#endif
{
//write debug output of feature
if (debug_)
{
writeFeatureDebugInfo_(fitter, traces, new_traces, feature_ok, error_msg, final_score, plot_nr, peak);
}
}
traces = new_traces;
//validity output
if (!feature_ok)
{
abort_(seeds[i], error_msg);
//continue;
}
else
{
//------------------------------------------------------------------
//Step 3.3.5:
//Feature creation
//------------------------------------------------------------------
Feature f;
//set label
f.setMetaValue(3, plot_nr);
f.setCharge(c);
f.setOverallQuality(final_score);
f.setMetaValue("score_fit", fit_score);
f.setMetaValue("score_correlation", correlation);
f.setRT(fitter->getCenter());
f.setWidth(fitter->getFWHM());
// Extract some of the model parameters.
if (egh_tau != 0.0)
{
egh_tau = (static_cast<EGHTraceFitter<PeakType>*>(fitter))->getTau();
f.setMetaValue("EGH_tau", egh_tau);
f.setMetaValue("EGH_height", (static_cast<EGHTraceFitter<PeakType>*>(fitter))->getHeight());
f.setMetaValue("EGH_sigma", (static_cast<EGHTraceFitter<PeakType>*>(fitter))->getSigma());
}
// Calculate the mass of the feature: maximum, average, monoisotopic
if (reported_mz_ == "maximum")
{
f.setMZ(traces[traces.getTheoreticalmaxPosition()].getAvgMZ());
}
else if (reported_mz_ == "average")
{
DoubleReal total_intensity = 0.0;
DoubleReal average_mz = 0.0;
for (Size t = 0; t < traces.size(); ++t)
{
for (Size p = 0; p < traces[t].peaks.size(); ++p)
{
average_mz += traces[t].peaks[p].second->getMZ() * traces[t].peaks[p].second->getIntensity();
total_intensity += traces[t].peaks[p].second->getIntensity();
}
}
average_mz /= total_intensity;
f.setMZ(average_mz);
}
else if (reported_mz_ == "monoisotopic")
{
DoubleReal mono_mz = traces[traces.getTheoreticalmaxPosition()].getAvgMZ();
mono_mz -= (Constants::PROTON_MASS_U / c) * (traces.getTheoreticalmaxPosition() + best_pattern.theoretical_pattern.trimmed_left);
f.setMZ(mono_mz);
}
// Calculate intensity based on model only
// - the model does not include the baseline, so we ignore it here
// - as we scaled the isotope distribution to
f.setIntensity(fitter->getArea() / getIsotopeDistribution_(f.getMZ()).max);
// we do not need the fitter anymore
delete fitter;
//add convex hulls of mass traces
for (Size j = 0; j < traces.size(); ++j)
{
f.getConvexHulls().push_back(traces[j].getConvexhull());
}
#ifdef _OPENMP
#pragma omp critical (FeatureFinderAlgorithmPicked_TMPFEATUREMAP)
#endif
{
tmp_feature_map[i] = f;
}
//----------------------------------------------------------------
//Remember all seeds that lie inside the convex hull of the new feature
DBoundingBox<2> bb = f.getConvexHull().getBoundingBox();
for (Size j = i + 1; j < seeds.size(); ++j)
{
DoubleReal rt = map_[seeds[j].spectrum].getRT();
DoubleReal mz = map_[seeds[j].spectrum][seeds[j].peak].getMZ();
if (bb.encloses(rt, mz) && f.encloses(rt, mz))
{
#ifdef _OPENMP
#pragma omp critical (FeatureFinderAlgorithmPicked_SEEDSINFEATURES)
#endif
{
seeds_in_features[i].push_back(j);
}
}
}
}
}
} // three if/else statements instead of continue (disallowed in OpenMP)
} // end of OPENMP over seeds
// Here we have to evaluate which seeds are already contained in
// features of seeds with higher intensities. Only if the seed is not
// used in any feature with higher intensity, we can add it to the
// features_ list.
std::vector<Size> seeds_contained;
for (typename std::map<Size, FeatureType>::iterator iter = tmp_feature_map.begin(); iter != tmp_feature_map.end(); ++iter)
{
Size seed_nr = iter->first;
bool is_used = false;
for (Size i = 0; i < seeds_contained.size(); ++i)
{
if (seed_nr == seeds_contained[i]) { is_used = true; break; }
}
if (!is_used)
{
++feature_candidates;
//re-set label
iter->second.setMetaValue(3, feature_nr_global);
++feature_nr_global;
features_->push_back(iter->second);
std::vector<Size> curr_seed = seeds_in_features[seed_nr];
for (Size k = 0; k < curr_seed.size(); ++k)
{
seeds_contained.push_back(curr_seed[k]);
}
}
}
IF_MASTERTHREAD ff_->endProgress();
std::cout << "Found " << feature_candidates << " feature candidates for charge " << c << "." << std::endl;
}
// END OPENMP
//------------------------------------------------------------------
//Step 4:
//Resolve contradicting and overlapping features
//------------------------------------------------------------------
ff_->startProgress(0, features_->size() * features_->size(), "Resolving overlapping features");
if (debug_) log_ << "Resolving intersecting features (" << features_->size() << " candidates)" << std::endl;
//sort features according to m/z in order to speed up the resolution
features_->sortByMZ();
//precalculate BBs and maximum mz span
std::vector<DBoundingBox<2> > bbs(features_->size());
DoubleReal max_mz_span = 0.0;
for (Size i = 0; i < features_->size(); ++i)
{
bbs[i] = (*features_)[i].getConvexHull().getBoundingBox();
if (bbs[i].height() > max_mz_span)
{
max_mz_span = bbs[i].height();
}
}
Size removed(0);
//intersect
for (Size i = 0; i < features_->size(); ++i)
{
Feature& f1((*features_)[i]);
for (Size j = i + 1; j < features_->size(); ++j)
{
ff_->setProgress(i * features_->size() + j);
Feature& f2((*features_)[j]);
//features that are more than 2 times the maximum m/z span apart do not overlap => abort
if (f2.getMZ() - f1.getMZ() > 2.0 * max_mz_span) break;
//do nothing if one of the features is already removed
if (f1.getIntensity() == 0.0 || f2.getIntensity() == 0.0) continue;
//do nothing if the overall convex hulls do not overlap
if (!bbs[i].intersects(bbs[j])) continue;
//act depending on the intersection
DoubleReal intersection = intersection_(f1, f2);
if (intersection >= max_feature_intersection_)
{
++removed;
if (debug_) log_ << " - Intersection (" << (i + 1) << "/" << (j + 1) << "): " << intersection << std::endl;
if (f1.getCharge() == f2.getCharge())
{
if (f1.getIntensity() * f1.getOverallQuality() > f2.getIntensity() * f2.getOverallQuality())
{
if (debug_) log_ << " - same charge -> removing duplicate " << (j + 1) << std::endl;
f1.getSubordinates().push_back(f2);
f2.setIntensity(0.0);
}
else
{
if (debug_) log_ << " - same charge -> removing duplicate " << (i + 1) << std::endl;
f2.getSubordinates().push_back(f1);
f1.setIntensity(0.0);
}
}
else if (f2.getCharge() % f1.getCharge() == 0)
{
if (debug_) log_ << " - different charge (one is the multiple of the other) -> removing lower charge " << (i + 1) << std::endl;
f2.getSubordinates().push_back(f1);
f1.setIntensity(0.0);
}
else if (f1.getCharge() % f2.getCharge() == 0)
{
if (debug_) log_ << " - different charge (one is the multiple of the other) -> removing lower charge " << (i + 1) << std::endl;
f1.getSubordinates().push_back(f2);
f2.setIntensity(0.0);
}
else
{
if (f1.getOverallQuality() > f2.getOverallQuality())
{
if (debug_) log_ << " - different charge -> removing lower score " << (j + 1) << std::endl;
f1.getSubordinates().push_back(f2);
f2.setIntensity(0.0);
}
else
{
if (debug_) log_ << " - different charge -> removing lower score " << (i + 1) << std::endl;
f2.getSubordinates().push_back(f1);
f1.setIntensity(0.0);
}
}
}
}
}
LOG_INFO << "Removed " << removed << " overlapping features." << std::endl;
//finally remove features with intensity 0
FeatureMap<> tmp;
tmp.reserve(features_->size());
for (Size i = 0; i < features_->size(); ++i)
{
if (features_->operator[](i).getIntensity() != 0.0)
{
tmp.push_back(features_->operator[](i));
}
}
tmp.swapFeaturesOnly(*features_);
//sort features by intensity
features_->sortByIntensity(true);
ff_->endProgress();
std::cout << features_->size() << " features left." << std::endl;
//Abort reasons
std::cout << std::endl;
std::cout << "Abort reasons during feature construction:" << std::endl;
for (std::map<String, UInt>::const_iterator it = aborts_.begin(); it != aborts_.end(); ++it)
{
std::cout << "- " << it->first << ": " << it->second << std::endl;
}
if (debug_)
{
//store map of abort reasons for failed seeds
FeatureMap<> abort_map;
abort_map.reserve(abort_reasons_.size());
Size counter = 0;
for (typename std::map<Seed, String>::iterator it2 = abort_reasons_.begin(); it2 != abort_reasons_.end(); ++it2, ++counter)
{
Feature f;
f.setRT(map_[it2->first.spectrum].getRT());
f.setMZ(map_[it2->first.spectrum][it2->first.peak].getMZ());
f.setIntensity(map_[it2->first.spectrum][it2->first.peak].getIntensity());
f.setMetaValue("label", it2->second);
f.setUniqueId(counter); // ID = index
abort_map.push_back(f);
}
abort_map.setUniqueId();
FeatureXMLFile().store("debug/abort_reasons.featureXML", abort_map);
//store input map with calculated scores (without overall score)
for (Size s = 0; s < map_.size(); ++s)
{
map_[s].getFloatDataArrays().erase(map_[s].getFloatDataArrays().begin() + 2);
}
MzMLFile().store("debug/input.mzML", map_);
}
}
static FeatureFinderAlgorithm<PeakType, FeatureType>* create()
{
return new FeatureFinderAlgorithmPicked();
}
static const String getProductName()
{
return "centroided";
}
protected:
/// editable copy of the map
MapType map_;
/// Output stream for log/debug info
mutable std::ofstream log_;
/// debug flag
bool debug_;
/// Array of abort reasons
std::map<String, UInt> aborts_;
/// Array of abort reasons
std::map<Seed, String> abort_reasons_;
/// User-specified seed list
FeatureMapType seeds_;
/// @name Members for parameters often needed in methods
//@{
DoubleReal pattern_tolerance_; ///< Stores mass_trace:mz_tolerance
DoubleReal trace_tolerance_; ///< Stores isotopic_pattern:mz_tolerance
UInt min_spectra_; ///< Number of spectra that have to show the same mass (for finding a mass trace)
UInt max_missing_trace_peaks_; ///< Stores mass_trace:max_missing
DoubleReal slope_bound_; ///< Max slope of mass trace intensities
DoubleReal intensity_percentage_; ///< Isotope pattern intensity contribution of required peaks
DoubleReal intensity_percentage_optional_; ///< Isotope pattern intensity contribution of optional peaks
DoubleReal optional_fit_improvement_; ///< Minimal improvement for leaving out optional isotope
DoubleReal mass_window_width_; ///< Width of the isotope pattern mass bins
UInt intensity_bins_; ///< Number of bins (in RT and MZ) for intensity significance estimation
DoubleReal min_isotope_fit_; ///< Minimum isotope pattern fit for a feature
DoubleReal min_trace_score_; ///< Minimum quality of a traces
DoubleReal min_rt_span_; ///< Minimum RT range that has to be left after the fit
DoubleReal max_rt_span_; ///< Maximum RT range the model is allowed to span
DoubleReal max_feature_intersection_; ///< Maximum allowed feature intersection (if larger, that one of the feature is removed)
String reported_mz_; ///< The mass type that is reported for features. 'maximum' returns the m/z value of the highest mass trace. 'average' returns the intensity-weighted average m/z value of all contained peaks. 'monoisotopic' returns the monoisotopic m/z value derived from the fitted isotope model.
//@}
/// @name Members for intensity significance estimation
//@{
/// RT bin width
DoubleReal intensity_rt_step_;
/// m/z bin width
DoubleReal intensity_mz_step_;
/// Precalculated intensity 20-quantiles (binned)
std::vector<std::vector<std::vector<DoubleReal> > > intensity_thresholds_;
//@}
///Vector of precalculated isotope distributions for several mass windows
std::vector<TheoreticalIsotopePattern> isotope_distributions_;
// Docu in base class
virtual void updateMembers_()
{
pattern_tolerance_ = param_.getValue("mass_trace:mz_tolerance");
trace_tolerance_ = param_.getValue("isotopic_pattern:mz_tolerance");
min_spectra_ = (UInt) std::floor((DoubleReal)param_.getValue("mass_trace:min_spectra") * 0.5);
max_missing_trace_peaks_ = param_.getValue("mass_trace:max_missing");
slope_bound_ = param_.getValue("mass_trace:slope_bound");
intensity_percentage_ = (DoubleReal)param_.getValue("isotopic_pattern:intensity_percentage") / 100.0;
intensity_percentage_optional_ = (DoubleReal)param_.getValue("isotopic_pattern:intensity_percentage_optional") / 100.0;
optional_fit_improvement_ = (DoubleReal)param_.getValue("isotopic_pattern:optional_fit_improvement") / 100.0;
mass_window_width_ = param_.getValue("isotopic_pattern:mass_window_width");
intensity_bins_ = param_.getValue("intensity:bins");
min_isotope_fit_ = param_.getValue("feature:min_isotope_fit");
min_trace_score_ = param_.getValue("feature:min_trace_score");
min_rt_span_ = param_.getValue("feature:min_rt_span");
max_rt_span_ = param_.getValue("feature:max_rt_span");
max_feature_intersection_ = param_.getValue("feature:max_intersection");
reported_mz_ = param_.getValue("feature:reported_mz");
}
/// Writes the abort reason to the log file and counts occurrences for each reason
void abort_(const Seed& seed, const String& reason)
{
if (debug_) log_ << "Abort: " << reason << std::endl;
aborts_[reason]++;
if (debug_) abort_reasons_[seed] = reason;
}
/**
* Calculates the intersection between features.
* The value is normalized by the size of the smaller feature, so it ranges from 0 to 1.
*/
DoubleReal intersection_(const Feature& f1, const Feature& f2) const
{
//calculate the RT range sum of feature 1
DoubleReal s1 = 0.0;
const std::vector<ConvexHull2D>& hulls1 = f1.getConvexHulls();
for (Size i = 0; i < hulls1.size(); ++i)
{
s1 += hulls1[i].getBoundingBox().width();
}
//calculate the RT range sum of feature 2
DoubleReal s2 = 0.0;
const std::vector<ConvexHull2D>& hulls2 = f2.getConvexHulls();
for (Size j = 0; j < hulls2.size(); ++j)
{
s2 += hulls2[j].getBoundingBox().width();
}
//calculate overlap
DoubleReal overlap = 0.0;
for (Size i = 0; i < hulls1.size(); ++i)
{
DBoundingBox<2> bb1 = hulls1[i].getBoundingBox();
for (Size j = 0; j < hulls2.size(); ++j)
{
DBoundingBox<2> bb2 = hulls2[j].getBoundingBox();
if (bb1.intersects(bb2))
{
if (bb1.minPosition()[0] <= bb2.minPosition()[0] &&
bb1.maxPosition()[0] >= bb2.maxPosition()[0]) //bb1 contains bb2
{
overlap += bb2.width();
}
else if (bb2.minPosition()[0] <= bb1.minPosition()[0] &&
bb2.maxPosition()[0] >= bb1.maxPosition()[0]) //bb2 contains bb1
{
overlap += bb1.width();
}
else if (bb1.minPosition()[0] <= bb2.minPosition()[0] &&
bb1.maxPosition()[0] <= bb2.maxPosition()[0]) //the end of bb1 overlaps with bb2
{
overlap += bb1.maxPosition()[0] - bb2.minPosition()[0];
}
else if (bb2.minPosition()[0] <= bb1.minPosition()[0] &&
bb2.maxPosition()[0] <= bb1.maxPosition()[0]) //the end of bb2 overlaps with bb1
{
overlap += bb2.maxPosition()[0] - bb1.minPosition()[0];
}
}
}
}
return overlap / std::min(s1, s2);
}
/// Returns the isotope distribution for a certain mass window
const TheoreticalIsotopePattern& getIsotopeDistribution_(DoubleReal mass) const
{
//calculate index in the vector
Size index = (Size) std::floor(mass / mass_window_width_);
if (index >= isotope_distributions_.size())
{
throw Exception::InvalidValue(__FILE__, __LINE__, __PRETTY_FUNCTION__, "IsotopeDistribution not precalculated. Maximum allowed index is " + String(isotope_distributions_.size()), String(index));
}
//Return distribution
return isotope_distributions_[index];
}
/**
@brief Finds the best fitting position of the isotopic pattern estimate defined by @p center
@param center the maximum peak of the isotope distribution (contains charge as well)
@param charge The charge of the pattern
@param best_pattern Returns the indices of the isotopic peaks. If a isotopic peak is missing -1 is returned.
*/
DoubleReal findBestIsotopeFit_(const Seed& center, UInt charge, IsotopePattern& best_pattern) const
{
if (debug_) log_ << "Testing isotope patterns for charge " << charge << ": " << std::endl;
const SpectrumType& spectrum = map_[center.spectrum];
const TheoreticalIsotopePattern& isotopes = getIsotopeDistribution_(spectrum[center.peak].getMZ() * charge);
if (debug_) log_ << " - Seed: " << center.peak << " (mz:" << spectrum[center.peak].getMZ() << ")" << std::endl;
//Find m/z boundaries of search space (linear search as this is local and we have the center already)
DoubleReal mass_window = (DoubleReal)(isotopes.size() + 1) / (DoubleReal)charge;
if (debug_) log_ << " - Mass window: " << mass_window << std::endl;
Size end = center.peak;
while (end < spectrum.size() &&
spectrum[end].getMZ() < spectrum[center.peak].getMZ() + mass_window)
{
++end;
}
--end;
//search begin
SignedSize begin = center.peak;
while (begin >= 0 &&
spectrum[begin].getMZ() > spectrum[center.peak].getMZ() - mass_window)
{
--begin;
}
++begin;
if (debug_) log_ << " - Begin: " << begin << " (mz:" << spectrum[begin].getMZ() << ")" << std::endl;
if (debug_) log_ << " - End: " << end << " (mz:" << spectrum[end].getMZ() << ")" << std::endl;
//fit isotope distribution to peaks
DoubleReal max_score = 0.0;
for (Size start = begin; start <= end; ++start)
{
//find isotope peaks for the current start peak
Size peak_index = start;
IsotopePattern pattern(isotopes.size());
if (debug_) log_ << " - Fitting at " << start << " (mz:" << spectrum[start].getMZ() << ")" << std::endl;
for (Size iso = 0; iso < isotopes.size(); ++iso)
{
DoubleReal pos = spectrum[start].getMZ() + iso / (DoubleReal)charge;
findIsotope_(pos, center.spectrum, pattern, iso, peak_index);
}
//check if the seed is contained, otherwise abort
bool seed_contained = false;
for (Size iso = 0; iso < pattern.peak.size(); ++iso)
{
if (pattern.peak[iso] == (Int)center.peak && pattern.spectrum[iso] == center.spectrum)
{
seed_contained = true;
break;
}
}
if (!seed_contained)
{
if (debug_) log_ << " - aborting: seed is not contained!" << std::endl;
continue;
}
DoubleReal score = isotopeScore_(isotopes, pattern, false);
//check if the seed is still contained, otherwise abort
seed_contained = false;
for (Size iso = 0; iso < pattern.peak.size(); ++iso)
{
if (pattern.peak[iso] == (Int)center.peak &&
pattern.spectrum[iso] == center.spectrum)
{
seed_contained = true;
break;
}
}
if (!seed_contained)
{
if (debug_) log_ << " - aborting: seed was removed during isotope fit!" << std::endl;
continue;
}
if (debug_) log_ << " - final score: " << score << std::endl;
if (score > max_score)
{
max_score = score;
best_pattern = pattern;
}
}
if (debug_) log_ << " - best score : " << max_score << std::endl;
best_pattern.theoretical_pattern = isotopes;
return max_score;
}
/**
Extends all mass traces of an isotope pattern in one step
@param pattern The IsotopePattern that should be extended.
@param traces The MassTraces datastructure where the extended mass traces will be stored in.
@param meta_index_overall The index of the data array where the quality scores for the given charge are stored.
*/
void extendMassTraces_(const IsotopePattern& pattern, MassTraces& traces, Size meta_index_overall) const
{
//find index of the trace with the maximum intensity
DoubleReal max_int = 0.0;
Size max_trace_index = 0;
for (Size p = 0; p < pattern.peak.size(); ++p)
{
if (pattern.peak[p] < 0) continue; //skip missing and removed traces
if (map_[pattern.spectrum[p]][pattern.peak[p]].getIntensity() > max_int)
{
max_int = map_[pattern.spectrum[p]][pattern.peak[p]].getIntensity();
max_trace_index = p;
}
}
//extend the maximum intensity trace to determine the boundaries in RT dimension
Size start_index = pattern.spectrum[max_trace_index];
const PeakType* start_peak = &(map_[pattern.spectrum[max_trace_index]][pattern.peak[max_trace_index]]);
DoubleReal start_mz = start_peak->getMZ();
DoubleReal start_rt = map_[start_index].getRT();
if (debug_) log_ << " - Trace " << max_trace_index << " (maximum intensity)" << std::endl;
if (debug_) log_ << " - extending from: " << map_[start_index].getRT() << " / " << start_mz << " (int: " << start_peak->getIntensity() << ")" << std::endl;
//initialize the trace and extend
MassTrace max_trace;
max_trace.peaks.push_back(std::make_pair(start_rt, start_peak));
extendMassTrace_(max_trace, start_index, start_mz, false, meta_index_overall);
extendMassTrace_(max_trace, start_index, start_mz, true, meta_index_overall);
DoubleReal rt_max = max_trace.peaks.back().first;
DoubleReal rt_min = max_trace.peaks.begin()->first;
if (debug_) log_ << " - rt bounds: " << rt_min << "-" << rt_max << std::endl;
//Abort if too few peak were found
if (!max_trace.isValid() || max_trace.peaks.size() < 2 * min_spectra_ - max_missing_trace_peaks_)
{
if (debug_) log_ << " - could not extend trace with maximum intensity => abort" << std::endl;
return;
}
for (Size p = 0; p < pattern.peak.size(); ++p)
{
if (debug_) log_ << " - Trace " << p << std::endl;
if (p == max_trace_index)
{
if (debug_) log_ << " - previously extended maximum trace" << std::endl;
traces.push_back(max_trace);
traces.back().theoretical_int = pattern.theoretical_pattern.intensity[p];
traces.max_trace = traces.size() - 1;
continue;
}
Seed starting_peak;
starting_peak.spectrum = pattern.spectrum[p];
starting_peak.peak = pattern.peak[p];
if (pattern.peak[p] == -2)
{
if (debug_) log_ << " - removed during isotope fit" << std::endl;
continue;
}
else if (pattern.peak[p] == -1)
{
if (debug_) log_ << " - missing" << std::endl;
continue;
}
starting_peak.intensity = map_[starting_peak.spectrum][starting_peak.peak].getIntensity();
if (debug_) log_ << " - trace seed: " << map_[starting_peak.spectrum].getRT() << " / " << map_[starting_peak.spectrum][starting_peak.peak].getMZ() << " (int: " << map_[starting_peak.spectrum][starting_peak.peak].getIntensity() << ")" << std::endl;
//search for nearby maximum of the mass trace as the extension assumes that it starts at the maximum
Size begin = std::max((Size)0, starting_peak.spectrum - min_spectra_);
Size end = std::min(starting_peak.spectrum + min_spectra_, (Size)map_.size());
DoubleReal mz = map_[starting_peak.spectrum][starting_peak.peak].getMZ();
DoubleReal inte = map_[starting_peak.spectrum][starting_peak.peak].getIntensity();
for (Size spectrum_index = begin; spectrum_index < end; ++spectrum_index)
{
//find better seeds (no-empty scan/low mz diff/higher intensity)
SignedSize peak_index = -1;
try
{
peak_index = map_[spectrum_index].findNearest(map_[starting_peak.spectrum][starting_peak.peak].getMZ());
}
catch (...) //no peaks in the spectrum
{
peak_index = -1;
}
if (peak_index < 0 ||
map_[spectrum_index][peak_index].getIntensity() <= inte ||
std::fabs(mz - map_[spectrum_index][peak_index].getMZ()) >= pattern_tolerance_
)
{
continue;
}
starting_peak.spectrum = spectrum_index;
starting_peak.peak = peak_index;
inte = map_[spectrum_index][peak_index].getIntensity();
}
if (debug_) log_ << " - extending from: " << map_[starting_peak.spectrum].getRT() << " / " << map_[starting_peak.spectrum][starting_peak.peak].getMZ() << " (int: " << map_[starting_peak.spectrum][starting_peak.peak].getIntensity() << ")" << std::endl;
//------------------------------------------------------------------
//Extend seed to a mass trace
MassTrace trace;
const PeakType* seed = &(map_[starting_peak.spectrum][starting_peak.peak]);
//initialize trace with seed data and extend
trace.peaks.push_back(std::make_pair(map_[starting_peak.spectrum].getRT(), seed));
extendMassTrace_(trace, starting_peak.spectrum, seed->getMZ(), false, meta_index_overall, rt_min, rt_max);
extendMassTrace_(trace, starting_peak.spectrum, seed->getMZ(), true, meta_index_overall, rt_min, rt_max);
//check if enough peaks were found
if (!trace.isValid())
{
if (debug_) log_ << " - could not extend trace " << std::endl;
//Missing traces in the middle of a pattern are not acceptable => fix this
if (p < traces.max_trace)
{
traces.clear(); //remove earlier traces
continue;
}
else if (p > traces.max_trace)
{
break; //no more traces are possible
}
}
traces.push_back(trace);
traces.back().theoretical_int = pattern.theoretical_pattern.intensity[p];
}
}
/**
@brief Extends a single mass trace in one RT direction
How to use this method:
- Add the starting peak to the @p trace
- Indicate using @c increase_rt whether to extend in downstream or upstream direction
@param trace The trace that should be extended
@param spectrum_index The index of the spectrum from which on the mass trace should be extended
@param mz The mz location (center) of the trace
@param increase_rt Indicator whether the extension is done in forward or backward direction (with respect to the current spectrum)
@param meta_index_overall The index of the overall score
@param min_rt The rt minimum up to which the trace will be extended.
@param max_rt The rt maximum up to which the trace will be extended.
@note This method assumes that it extends from a local maximum.
@note If @c min_rt or @c max_rt are set to 0.0 no boundary is assumed in the respective direction.
*/
void extendMassTrace_(MassTrace& trace, SignedSize spectrum_index, DoubleReal mz, bool increase_rt, Size meta_index_overall, DoubleReal min_rt = 0.0, DoubleReal max_rt = 0.0) const
{
//Reverse peaks if we run the method for the second time (to keep them in chronological order)
if (increase_rt)
{
++spectrum_index;
std::reverse(trace.peaks.begin(), trace.peaks.end());
}
else
{
--spectrum_index;
}
//check if boundaries are set
bool boundaries = false;
if (max_rt != min_rt)
{
boundaries = true;
}
//Relax slope threshold if there is a hard boundary for the extension
DoubleReal current_slope_bound = (1.0 + (DoubleReal)boundaries) * slope_bound_;
Size delta_count = min_spectra_;
std::vector<DoubleReal> deltas(delta_count - 1, 0);
DoubleReal last_observed_intensity = trace.peaks.back().second->getIntensity();
UInt missing_peaks = 0;
Size peaks_before_extension = trace.peaks.size();
String abort_reason = "";
while ((!increase_rt && spectrum_index >= 0) || (increase_rt && spectrum_index < (SignedSize)map_.size()))
{
if (boundaries &&
((!increase_rt && map_[spectrum_index].getRT() < min_rt) ||
(increase_rt && map_[spectrum_index].getRT() > max_rt))
)
{
abort_reason = "Hit upper/lower boundary";
break;
}
SignedSize peak_index = -1;
try
{
peak_index = map_[spectrum_index].findNearest(mz);
}
catch (...) //no peaks in the spectrum
{
peak_index = -1;
}
// check if the peak is "missing"
if (
peak_index < 0 // no peak found
|| map_[spectrum_index].getFloatDataArrays()[meta_index_overall][peak_index] < 0.01 // overall score is to low
|| positionScore_(mz, map_[spectrum_index][peak_index].getMZ(), trace_tolerance_) == 0.0 // deviation of mz is too big
)
{
++missing_peaks;
if (missing_peaks > max_missing_trace_peaks_)
{
abort_reason = "too many peaks missing";
break;
}
}
else
{
missing_peaks = 0;
//add found peak to trace
trace.peaks.push_back(std::make_pair(map_[spectrum_index].getRT(), &(map_[spectrum_index][peak_index])));
//update deltas and intensities
deltas.push_back((map_[spectrum_index][peak_index].getIntensity() - last_observed_intensity) / last_observed_intensity);
last_observed_intensity = map_[spectrum_index][peak_index].getIntensity();
//Abort if the average delta is too big (as intensity increases then)
DoubleReal average_delta = std::accumulate(deltas.end() - delta_count, deltas.end(), 0.0) / (DoubleReal)delta_count;
if (average_delta > current_slope_bound)
{
abort_reason = String("Average delta above threshold: ") + average_delta + "/" + current_slope_bound;
//remove last peaks as we extended too far
Size remove = std::min((Size)(trace.peaks.size() - peaks_before_extension), delta_count - 1);
trace.peaks.erase(trace.peaks.end() - remove, trace.peaks.end());
break;
}
}
//increase/decrease scan index
if (increase_rt) ++spectrum_index;
else --spectrum_index;
}
if (debug_) log_ << " - Added " << (trace.peaks.size() - peaks_before_extension) << " peaks (abort: " << abort_reason << ")" << std::endl;
}
/// Returns the index of the peak nearest to m/z @p pos in spectrum @p spec (linear search starting from index @p start)
template <typename SpectrumType>
Size nearest_(DoubleReal pos, const SpectrumType& spec, Size start) const
{
Size index = start;
DoubleReal distance = std::fabs(pos - spec[index].getMZ());
++index;
while (index < spec.size())
{
DoubleReal new_distance = std::fabs(pos - spec[index].getMZ());
if (new_distance < distance)
{
distance = new_distance;
++index;
}
else
{
break;
}
}
return --index;
}
/**
@brief Searches for an isotopic peak in the current spectrum and the adjacent spectra
@param pos m/z position of the searched for peak
@param spectrum_index index of the central spectrum
@param pattern IsotopePattern to store found peaks
@param pattern_index index of the isotope in the pattern
@param peak_index starting index of the search (to avoid multiple binary searches)
*/
void findIsotope_(DoubleReal pos, Size spectrum_index, IsotopePattern& pattern, Size pattern_index, Size& peak_index) const
{
if (debug_) log_ << " - Isotope " << pattern_index << ": ";
DoubleReal intensity = 0.0;
DoubleReal pos_score = 0.0;
UInt matches = 0;
//search in the center spectrum
const SpectrumType& spectrum = map_[spectrum_index];
peak_index = nearest_(pos, spectrum, peak_index);
DoubleReal mz_score = positionScore_(pos, spectrum[peak_index].getMZ(), pattern_tolerance_);
pattern.theoretical_mz[pattern_index] = pos;
if (mz_score != 0.0)
{
if (debug_) log_ << String::number(spectrum[peak_index].getIntensity(), 1) << " ";
pattern.peak[pattern_index] = peak_index;
pattern.spectrum[pattern_index] = spectrum_index;
intensity += spectrum[peak_index].getIntensity();
pos_score += mz_score;
++matches;
}
//previous spectrum
if (spectrum_index != 0 && !map_[spectrum_index - 1].empty())
{
const SpectrumType& spectrum_before = map_[spectrum_index - 1];
Size index_before = spectrum_before.findNearest(pos);
DoubleReal mz_score = positionScore_(pos, spectrum_before[index_before].getMZ(), pattern_tolerance_);
if (mz_score != 0.0)
{
if (debug_) log_ << String::number(spectrum_before[index_before].getIntensity(), 1) << "b ";
intensity += spectrum_before[index_before].getIntensity();
pos_score += mz_score;
++matches;
if (pattern.peak[pattern_index] == -1)
{
pattern.peak[pattern_index] = index_before;
pattern.spectrum[pattern_index] = spectrum_index - 1;
}
}
}
//next spectrum
if (spectrum_index != map_.size() - 1 && !map_[spectrum_index + 1].empty())
{
const SpectrumType& spectrum_after = map_[spectrum_index + 1];
Size index_after = spectrum_after.findNearest(pos);
DoubleReal mz_score = positionScore_(pos, spectrum_after[index_after].getMZ(), pattern_tolerance_);
if (mz_score != 0.0)
{
if (debug_) log_ << String::number(spectrum_after[index_after].getIntensity(), 1) << "a ";
intensity += spectrum_after[index_after].getIntensity();
pos_score += mz_score;
++matches;
if (pattern.peak[pattern_index] == -1)
{
pattern.peak[pattern_index] = index_after;
pattern.spectrum[pattern_index] = spectrum_index + 1;
}
}
}
//no isotope found
if (matches == 0)
{
if (debug_) log_ << " missing" << std::endl;
pattern.peak[pattern_index] = -1;
pattern.mz_score[pattern_index] = 0.0;
pattern.intensity[pattern_index] = 0.0;
}
else
{
if (debug_) log_ << "=> " << intensity / matches << std::endl;
pattern.mz_score[pattern_index] = pos_score / matches;
pattern.intensity[pattern_index] = intensity / matches;
}
}
/// Calculates a score between 0 and 1 for the m/z deviation of two peaks.
DoubleReal positionScore_(DoubleReal pos1, DoubleReal pos2, DoubleReal allowed_deviation) const
{
DoubleReal diff = fabs(pos1 - pos2);
if (diff <= 0.5 * allowed_deviation)
{
return 0.1 * (0.5 * allowed_deviation - diff) / (0.5 * allowed_deviation) + 0.9;
}
else if (diff <= allowed_deviation)
{
return 0.9 * (allowed_deviation - diff) / (0.5 * allowed_deviation);
}
return 0.0;
}
/// Calculates a score between 0 and 1 for the correlation between theoretical and found isotope pattern
DoubleReal isotopeScore_(const TheoreticalIsotopePattern& isotopes, IsotopePattern& pattern, bool consider_mz_distances) const
{
if (debug_) log_ << " - fitting " << pattern.intensity.size() << " peaks" << std::endl;
//Abort if a core peak is missing
for (Size iso = 0 + isotopes.optional_begin; iso < pattern.peak.size() - isotopes.optional_end; ++iso)
{
if (pattern.peak[iso] == -1)
{
if (debug_) log_ << " - aborting: core peak is missing" << std::endl;
return 0.0;
}
}
//Find best isotope fit
// - try to leave out optional isotope peaks to improve the fit
// - do not allow gaps inside the pattern
DoubleReal best_int_score = 0.01; //Not 0 as this would result in problems when checking for the percental improvement
Size best_begin = 0;
for (Size i = isotopes.optional_begin; i > 0; --i)
{
if (pattern.peak[i - 1] == -1)
{
best_begin = i;
break;
}
}
Size best_end = 0;
for (Size i = isotopes.optional_end; i > 0; --i)
{
if (pattern.peak[pattern.peak.size() - i] == -1)
{
best_end = i;
break;
}
}
if (debug_) log_ << " - best_begin/end: " << best_begin << "/" << best_end << std::endl;
for (Size b = best_begin; b <= isotopes.optional_begin; ++b)
{
for (Size e = best_end; e <= isotopes.optional_end; ++e)
{
//Make sure we have more than 2 peaks (unless in the first loop iteration, there we allow two points)
if (isotopes.size() - b - e > 2 || (b == best_begin &&
e == best_end &&
isotopes.size() - b - e > 1))
{
DoubleReal int_score = Math::pearsonCorrelationCoefficient(isotopes.intensity.begin() + b, isotopes.intensity.end() - e, pattern.intensity.begin() + b, pattern.intensity.end() - e);
if (boost::math::isnan(int_score)) int_score = 0.0;
if (isotopes.size() - b - e == 2 && int_score > min_isotope_fit_) int_score = min_isotope_fit_; //special case for the first loop iteration (otherwise the score is 1)
if (debug_) log_ << " - fit (" << b << "/" << e << "): " << int_score;
if (int_score / best_int_score >= 1.0 + optional_fit_improvement_)
{
if (debug_) log_ << " - new best fit ";
best_int_score = int_score;
best_begin = b;
best_end = e;
}
if (debug_) log_ << std::endl;
}
}
}
//if the best fit is empty, abort
if (pattern.mz_score.size() - best_begin - best_end == 0)
{
return 0.0;
}
//remove left out peaks from the beginning
for (Size i = 0; i < best_begin; ++i)
{
pattern.peak[i] = -2;
pattern.intensity[i] = 0.0;
pattern.mz_score[i] = 0.0;
}
//remove left out peaks from the end
for (Size i = 0; i < best_end; ++i)
{
pattern.peak[isotopes.size() - 1 - i] = -2;
pattern.intensity[isotopes.size() - 1 - i] = 0.0;
pattern.mz_score[isotopes.size() - 1 - i] = 0.0;
}
//calculate m/z score (if required)
if (consider_mz_distances)
{
best_int_score *= std::accumulate(pattern.mz_score.begin() + best_begin, pattern.mz_score.end() - best_end, 0.0) / (pattern.mz_score.size() - best_begin - best_end);
}
//return final score
OPENMS_POSTCONDITION(best_int_score >= 0.0, (String("Internal error: Isotope score (") + best_int_score + ") should be >=0.0").c_str())
OPENMS_POSTCONDITION(best_int_score <= 1.0, (String("Internal error: Isotope score (") + best_int_score + ") should be <=1.0").c_str())
return best_int_score;
}
/**
@brief Compute the intensity score for the peak @p peak in spectrum @p spectrum.
The intensity score is computed by interpolating the score between the 4 nearest intensity
bins. The scores from the different bins are weighted by the distance of the bin center to
the peak.
@param spectrum Index of the spectrum we are currently looking at
@param peak Index of the peak that should be scored inside the spectrum @p spectrum
*/
DoubleReal intensityScore_(Size spectrum, Size peak) const
{
// calculate (half) bin numbers
DoubleReal intensity = map_[spectrum][peak].getIntensity();
DoubleReal rt = map_[spectrum].getRT();
DoubleReal mz = map_[spectrum][peak].getMZ();
DoubleReal rt_min = map_.getMinRT();
DoubleReal mz_min = map_.getMinMZ();
UInt rt_bin = std::min(2 * intensity_bins_ - 1, (UInt) std::floor((rt - rt_min) / intensity_rt_step_ * 2.0));
UInt mz_bin = std::min(2 * intensity_bins_ - 1, (UInt) std::floor((mz - mz_min) / intensity_mz_step_ * 2.0));
// determine mz bins
UInt ml, mh;
if (mz_bin == 0 || mz_bin == 2 * intensity_bins_ - 1)
{
ml = mz_bin / 2;
mh = mz_bin / 2;
}
else if (Math::isOdd(mz_bin))
{
ml = mz_bin / 2;
mh = mz_bin / 2 + 1;
}
else
{
ml = mz_bin / 2 - 1;
mh = mz_bin / 2;
}
// determine rt bins
UInt rl, rh;
if (rt_bin == 0 || rt_bin == 2 * intensity_bins_ - 1)
{
rl = rt_bin / 2;
rh = rt_bin / 2;
}
else if (Math::isOdd(rt_bin))
{
rl = rt_bin / 2;
rh = rt_bin / 2 + 1;
}
else
{
rl = rt_bin / 2 - 1;
rh = rt_bin / 2;
}
// calculate distances to surrounding bin centers (normalized to [0,1])
DoubleReal drl = std::fabs(rt_min + (0.5 + rl) * intensity_rt_step_ - rt) / intensity_rt_step_;
DoubleReal drh = std::fabs(rt_min + (0.5 + rh) * intensity_rt_step_ - rt) / intensity_rt_step_;
DoubleReal dml = std::fabs(mz_min + (0.5 + ml) * intensity_mz_step_ - mz) / intensity_mz_step_;
DoubleReal dmh = std::fabs(mz_min + (0.5 + mh) * intensity_mz_step_ - mz) / intensity_mz_step_;
// Calculate weights for the intensity scores based on the distances to the
// bin center(the nearer to better)
DoubleReal d1 = std::sqrt(std::pow(1.0 - drl, 2) + std::pow(1.0 - dml, 2));
DoubleReal d2 = std::sqrt(std::pow(1.0 - drh, 2) + std::pow(1.0 - dml, 2));
DoubleReal d3 = std::sqrt(std::pow(1.0 - drl, 2) + std::pow(1.0 - dmh, 2));
DoubleReal d4 = std::sqrt(std::pow(1.0 - drh, 2) + std::pow(1.0 - dmh, 2));
DoubleReal d_sum = d1 + d2 + d3 + d4;
// Final score .. intensityScore in the surrounding bins, weighted by the distance of the
// bin center to the peak
DoubleReal final = intensityScore_(rl, ml, intensity) * (d1 / d_sum)
+ intensityScore_(rh, ml, intensity) * (d2 / d_sum)
+ intensityScore_(rl, mh, intensity) * (d3 / d_sum)
+ intensityScore_(rh, mh, intensity) * (d4 / d_sum);
OPENMS_POSTCONDITION(final >= 0.0, (String("Internal error: Intensity score (") + final + ") should be >=0.0").c_str())
OPENMS_POSTCONDITION(final <= 1.0001, (String("Internal error: Intensity score (") + final + ") should be <=1.0").c_str())
return final;
}
/**
@brief Choose a the best trace fitter for the current mass traces based on the user parameter
(symmetric, asymmetric) or based on an inspection of the mass trace (auto)
@return A pointer to the trace fitter that should be used.
*/
TraceFitter<PeakType>* chooseTraceFitter_(double& tau)
{
// choose fitter
if (param_.getValue("feature:rt_shape") == "asymmetric")
{
LOG_DEBUG << "use asymmetric rt peak shape" << std::endl;
tau = -1.0;
return new EGHTraceFitter<PeakType>();
}
else // if (param_.getValue("feature:rt_shape") == "symmetric")
{
LOG_DEBUG << "use symmetric rt peak shape" << std::endl;
return new GaussTraceFitter<PeakType>();
}
}
DoubleReal intensityScore_(Size rt_bin, Size mz_bin, DoubleReal intensity) const
{
// interpolate score value according to quantiles(20)
const std::vector<DoubleReal>& quantiles20 = intensity_thresholds_[rt_bin][mz_bin];
// get iterator pointing to quantile that is >= intensity
std::vector<DoubleReal>::const_iterator it = std::lower_bound(quantiles20.begin(), quantiles20.end(), intensity);
// bigger than the biggest value => return 1.0
if (it == quantiles20.end())
{
return 1.0;
}
// interpolate inside the bin
DoubleReal bin_score = 0.0;
if (it == quantiles20.begin())
{
bin_score = 0.05 * intensity / *it;
}
else
{
// (intensity - vigintile_low) / (vigintile_high - vigintile_low)
bin_score = 0.05 * (intensity - *(it - 1)) / (*it - *(it - 1));
}
DoubleReal final = bin_score +
0.05 * ((it - quantiles20.begin()) - 1.0); // determine position of lower bound in the vector
//fix numerical problems
if (final < 0.0) final = 0.0;
if (final > 1.0) final = 1.0;
// final = 1/20 * [ index(vigintile_low) + (intensity-vigintile_low) / (vigintile_high - vigintile_low) ]
return final;
}
/**
@name Handling of fitted mass traces
Methods to handle the results of the mass trace fitting process.
*/
//@{
/**
@brief Creates new mass traces @p new_traces based on the fitting result and the
original traces @p traces.
@param fitter The TraceFitter containing the results from the rt profile fitting step.
@param traces Original mass traces found in the experiment.
@param new_traces Mass traces created by cropping the original mass traces.
*/
void cropFeature_(TraceFitter<PeakType>* fitter,
const MassTraces& traces,
MassTraces& new_traces)
{
DoubleReal low_bound = fitter->getLowerRTBound();
DoubleReal high_bound = fitter->getUpperRTBound();
if (debug_) log_ << " => RT bounds: " << low_bound << " - " << high_bound << std::endl;
for (Size t = 0; t < traces.size(); ++t)
{
const MassTrace& trace = traces[t];
if (debug_) log_ << " - Trace " << t << ": (" << trace.theoretical_int << ")" << std::endl;
MassTrace new_trace;
//compute average relative deviation and correlation
DoubleReal deviation = 0.0;
std::vector<DoubleReal> v_theo, v_real;
for (Size k = 0; k < trace.peaks.size(); ++k)
{
//consider peaks when inside RT bounds only
if (trace.peaks[k].first >= low_bound && trace.peaks[k].first <= high_bound)
{
new_trace.peaks.push_back(trace.peaks[k]);
DoubleReal theo = traces.baseline + fitter->computeTheoretical(trace, k);
v_theo.push_back(theo);
DoubleReal real = trace.peaks[k].second->getIntensity();
v_real.push_back(real);
deviation += std::fabs(real - theo) / theo;
}
}
DoubleReal fit_score = 0.0;
DoubleReal correlation = 0.0;
DoubleReal final_score = 0.0;
if (!new_trace.peaks.empty())
{
fit_score = deviation / new_trace.peaks.size();
correlation = std::max(0.0, Math::pearsonCorrelationCoefficient(v_theo.begin(), v_theo.end(), v_real.begin(), v_real.end()));
final_score = std::sqrt(correlation * std::max(0.0, 1.0 - fit_score));
}
if (debug_) log_ << " - peaks: " << new_trace.peaks.size() << " / " << trace.peaks.size() << " - relative deviation: " << fit_score << " - correlation: " << correlation << " - final score: " << correlation << std::endl;
//remove badly fitting traces
if (!new_trace.isValid() || final_score < min_trace_score_)
{
if (t < traces.max_trace)
{
new_traces = MassTraces();
if (debug_) log_ << " - removed this and previous traces due to bad fit" << std::endl;
new_traces.clear(); //remove earlier traces
continue;
}
else if (t == traces.max_trace)
{
new_traces = MassTraces();
if (debug_) log_ << " - aborting (max trace was removed)" << std::endl;
break;
}
else if (t > traces.max_trace)
{
if (debug_) log_ << " - removed due to bad fit => omitting the rest" << std::endl;
break; //no more traces are possible
}
}
//add new trace
else
{
new_trace.theoretical_int = trace.theoretical_int;
new_traces.push_back(new_trace);
if (t == traces.max_trace)
{
new_traces.max_trace = new_traces.size() - 1;
}
}
}
new_traces.baseline = traces.baseline;
}
/**
@brief Checks the feature based on different score thresholds and model constraints
Feature can get invalid for following reasons:
<ul>
<li>Invalid fit: Fitted model is bigger than 'max_rt_span'</li>
<li>Invalid feature after fit - too few traces or peaks left</li>
<li>Invalid fit: Center outside of feature bounds</li>
<li>Invalid fit: Less than 'min_rt_span' left after fit</li>
<li>Feature quality too low after fit</li>
</ul>
@param fitter The TraceFitter containing the results from the rt profile fitting step.
@param feature_traces Cropped feature mass traces.
@param seed_mz Mz of the seed
@param min_feature_score Minimal required feature score
@param error_msg Will be filled with the error message, if the feature is invalid
@param fit_score Will be filled with the fit score
@param correlation Will be filled with correlation between feature and model
@param final_score Will be filled with the final score
@return true if the feature is valid
*/
bool checkFeatureQuality_(TraceFitter<PeakType>* fitter,
MassTraces& feature_traces,
const DoubleReal& seed_mz, const DoubleReal& min_feature_score,
String& error_msg, DoubleReal& fit_score, DoubleReal& correlation, DoubleReal& final_score)
{
bool feature_ok = true;
//check if the sigma fit was ok (if it is larger than 'max_rt_span')
if (feature_ok)
{
// 5.0 * sigma > max_rt_span_ * region_rt_span
if (fitter->checkMaximalRTSpan(max_rt_span_))
{
feature_ok = false;
error_msg = "Invalid fit: Fitted model is bigger than 'max_rt_span'";
}
}
//check if the feature is valid
if (!feature_traces.isValid(seed_mz, trace_tolerance_))
{
feature_ok = false;
error_msg = "Invalid feature after fit - too few traces or peaks left";
}
//check if x0 is inside feature bounds
if (feature_ok)
{
std::pair<DoubleReal, DoubleReal> rt_bounds = feature_traces.getRTBounds();
if (fitter->getCenter() < rt_bounds.first || fitter->getCenter() > rt_bounds.second)
{
feature_ok = false;
error_msg = "Invalid fit: Center outside of feature bounds";
}
}
//check if the remaining traces fill out at least 'min_rt_span' of the RT span
if (feature_ok)
{
std::pair<DoubleReal, DoubleReal> rt_bounds = feature_traces.getRTBounds();
if (fitter->checkMinimalRTSpan(rt_bounds, min_rt_span_))
{
feature_ok = false;
error_msg = "Invalid fit: Less than 'min_rt_span' left after fit";
}
}
//check if feature quality is high enough (average relative deviation and correlation of the whole feature)
if (feature_ok)
{
std::vector<DoubleReal> v_theo, v_real;
DoubleReal deviation = 0.0;
for (Size t = 0; t < feature_traces.size(); ++t)
{
MassTrace& trace = feature_traces[t];
for (Size k = 0; k < trace.peaks.size(); ++k)
{
// was DoubleReal theo = new_traces.baseline + trace.theoretical_int * height * exp(-0.5 * pow(trace.peaks[k].first - x0, 2) / pow(sigma, 2) );
DoubleReal theo = feature_traces.baseline + fitter->computeTheoretical(trace, k);
v_theo.push_back(theo);
DoubleReal real = trace.peaks[k].second->getIntensity();
v_real.push_back(real);
deviation += std::fabs(real - theo) / theo;
}
}
fit_score = std::max(0.0, 1.0 - (deviation / feature_traces.getPeakCount()));
correlation = std::max(0.0, Math::pearsonCorrelationCoefficient(v_theo.begin(), v_theo.end(), v_real.begin(), v_real.end()));
final_score = std::sqrt(correlation * fit_score);
if (final_score < min_feature_score)
{
feature_ok = false;
error_msg = "Feature quality too low after fit";
}
//quality output
if (debug_)
{
log_ << "Quality estimation:" << std::endl;
log_ << " - relative deviation: " << fit_score << std::endl;
log_ << " - correlation: " << correlation << std::endl;
log_ << " => final score: " << final_score << std::endl;
}
}
return feature_ok;
}
/**
@brief Creates several files containing plots and viewable data of the fitted mass trace
@param fitter The TraceFitter containing the results from the rt profile fitting step.
@param traces Original mass traces found in the spectra
@param new_traces Cropped feature mass traces
@param feature_ok Status of the feature
@param error_msg If the feature is invalid, @p error_msg contains the reason
@param final_score Final score of the feature
@param plot_nr Index of the feature
@param peak The Seed Peak
@param path The path where to put the debug files (default is debug/features)
*/
void writeFeatureDebugInfo_(TraceFitter<PeakType>* fitter,
const MassTraces& traces,
const MassTraces& new_traces,
bool feature_ok, const String error_msg, const DoubleReal final_score, const Int plot_nr, const PeakType& peak,
const String path = "debug/features/")
{
DoubleReal pseudo_rt_shift = param_.getValue("debug:pseudo_rt_shift");
TextFile tf;
//gnuplot script
String script = String("plot \"") + path + plot_nr + ".dta\" title 'before fit (RT: " + String::number(fitter->getCenter(), 2) + " m/z: " + String::number(peak.getMZ(), 4) + ")' with points 1";
//feature before fit
for (Size k = 0; k < traces.size(); ++k)
{
for (Size j = 0; j < traces[k].peaks.size(); ++j)
{
tf.push_back(String(pseudo_rt_shift * k + traces[k].peaks[j].first) + "\t" + traces[k].peaks[j].second->getIntensity());
}
}
tf.store(path + plot_nr + ".dta");
//fitted feature
if (new_traces.getPeakCount() != 0)
{
tf.clear();
for (Size k = 0; k < new_traces.size(); ++k)
{
for (Size j = 0; j < new_traces[k].peaks.size(); ++j)
{
tf.push_back(String(pseudo_rt_shift * k + new_traces[k].peaks[j].first) + "\t" + new_traces[k].peaks[j].second->getIntensity());
}
}
tf.store(path + plot_nr + "_cropped.dta");
script = script + ", \"" + path + plot_nr + "_cropped.dta\" title 'feature ";
if (!feature_ok)
{
script = script + " - " + error_msg;
}
else
{
script = script + (features_->size() + 1) + " (score: " + String::number(final_score, 3) + ")";
}
script = script + "' with points 3";
}
//fitted functions
tf.clear();
for (Size k = 0; k < traces.size(); ++k)
{
char fun = 'f';
fun += (char)k;
tf.push_back(fitter->getGnuplotFormula(traces[k], fun, traces.baseline, pseudo_rt_shift * k));
//tf.push_back(String(fun)+"(x)= " + traces.baseline + " + " + fitter->getGnuplotFormula(traces[k], pseudo_rt_shift * k));
script = script + ", " + fun + "(x) title 'Trace " + k + " (m/z: " + String::number(traces[k].getAvgMZ(), 4) + ")'";
}
//output
tf.push_back("set xlabel \"pseudo RT (mass traces side-by-side)\"");
tf.push_back("set ylabel \"intensity\"");
tf.push_back("set samples 1000");
tf.push_back(script);
tf.push_back("pause -1");
tf.store(path + plot_nr + ".plot");
}
//@}
private:
/// Not implemented
FeatureFinderAlgorithmPicked& operator=(const FeatureFinderAlgorithmPicked&);
/// Not implemented
FeatureFinderAlgorithmPicked(const FeatureFinderAlgorithmPicked&);
};
} // namespace OpenMS
#endif // OPENMS_TRANSFORMATIONS_FEATUREFINDER_FEATUREFINDERALGORITHMPICKED_H
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.