source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__lnot_int32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_uint8
// op(A') function: GB_tran__lnot_int32_uint8
// C type: int32_t
// A type: uint8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_uint8
(
int32_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nvector_openmp.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v)
{
return SUNDIALS_NVEC_OPENMP;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMP;
v->ops->nvclone = N_VClone_OpenMP;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMP;
v->ops->nvdestroy = N_VDestroy_OpenMP;
v->ops->nvspace = N_VSpace_OpenMP;
v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
v->ops->nvgetlength = N_VGetLength_OpenMP;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMP;
v->ops->nvconst = N_VConst_OpenMP;
v->ops->nvprod = N_VProd_OpenMP;
v->ops->nvdiv = N_VDiv_OpenMP;
v->ops->nvscale = N_VScale_OpenMP;
v->ops->nvabs = N_VAbs_OpenMP;
v->ops->nvinv = N_VInv_OpenMP;
v->ops->nvaddconst = N_VAddConst_OpenMP;
v->ops->nvdotprod = N_VDotProd_OpenMP;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMP;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
v->ops->nvmin = N_VMin_OpenMP;
v->ops->nvwl2norm = N_VWL2Norm_OpenMP;
v->ops->nvl1norm = N_VL1Norm_OpenMP;
v->ops->nvcompare = N_VCompare_OpenMP;
v->ops->nvinvtest = N_VInvTest_OpenMP;
v->ops->nvconstrmask = N_VConstrMask_OpenMP;
v->ops->nvminquotient = N_VMinQuotient_OpenMP;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction kernels */
v->ops->nvdotprodlocal = N_VDotProd_OpenMP;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP;
v->ops->nvminlocal = N_VMin_OpenMP;
v->ops->nvl1normlocal = N_VL1Norm_OpenMP;
v->ops->nvinvtestlocal = N_VInvTest_OpenMP;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->num_threads = num_threads;
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNFALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMP(N_Vector v)
{
return NV_LENGTH_OMP(v);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMP(N_Vector x)
{
N_VPrintFile_OpenMP(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%11.8g\n", xd[i]);
#else
fprintf(outfile, "%11.8g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
sunindextype length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data array if it's owned by the vector */
if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd;
i = 0; /* initialize to suppress clang warning */
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd, *yd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype tmax, max, *xd;
i = 0; /* initialize to suppress clang warning */
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype min, *xd;
realtype tmin;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
booleantype test;
i = 0; /* initialize to suppress clang warning */
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ZERO;
#pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \
schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
/* Continue if no constraints were set for the variable */
if (cd[i] == ZERO)
continue;
/* Check if a set constraint has been violated */
test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) ||
(SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO);
if (test) {
temp = md[i] = ONE; /* Here is a race to write to temp */
}
}
/* Return false if any constraint was violated */
return (temp == ONE) ? SUNFALSE : SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd, *dd, min, tmin, val;
i = 0; /* initialize to suppress clang warning */
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd, *wd, *idd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(sum);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] *= c[0];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
xd = NV_DATA_OMP(X[0]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[0] * xd[j];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,yd) shared(nvec,Y,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
yd[j] += a[i] * xd[j];
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,yd,zd) shared(nvec,Y,Z,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a[i] * xd[j] + yd[j];
}
}
}
return(0);
}
int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i;
sunindextype j, N;
realtype sum;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMP(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* compute multiple dot products */
#pragma omp parallel default(none) private(i,j,yd,sum) shared(nvec,Y,N,xd,dotprods) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += xd[j] * yd[j];
}
#pragma omp critical
{
dotprods[i] += sum;
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMP(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
realtype c;
N_Vector* V1;
N_Vector* V2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMP(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMP(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMP(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMP(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a,b) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a * xd[j] + b * yd[j];
}
}
}
return(0);
}
int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
xd[j] *= c[i];
}
}
}
return(0);
}
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp parallel default(none) private(i,j,xd,zd) shared(nvec,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[i] * xd[j];
}
}
}
return(0);
}
int N_VConstVectorArray_OpenMP(int nvec, realtype c, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMP(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* set each vector in the vector array to a constant */
#pragma omp parallel default(none) private(i,j,zd) shared(nvec,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c;
}
}
}
return(0);
}
int N_VWrmsNormVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMP(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
realtype* idd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMP(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMP(X[0]);
idd = NV_DATA_OMP(id);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,idd,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
if (idd[j] > ZERO)
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j;
sunindextype k, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMP(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector*) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMP(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
yd[k] += a[j] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,k,xd,yd,zd) shared(nvec,nsum,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
zd = NV_DATA_OMP(Z[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = a[j] * xd[k] + yd[k];
}
}
}
}
return(0);
}
int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd=NULL;
realtype* xd=NULL;
realtype* ctmp;
N_Vector* Y;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMP(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMP(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMP(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMP(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMP(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] *= c[0];
}
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd = NV_DATA_OMP(X[0][j]);
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = c[0] * xd[k];
}
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd, *yd;
i = 0; /* initialize to suppress clang warning */
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] + yd[j];
}
}
return(0);
}
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] - yd[j];
}
}
return(0);
}
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] + yd[j]);
}
}
return(0);
}
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] - yd[j]);
}
}
return(0);
}
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) + yd[j];
}
}
return(0);
}
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) - yd[j];
}
}
return(0);
}
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
if (a == ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += xd[j];
}
}
return(0);
}
if (a == -ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] -= xd[j];
}
}
return(0);
}
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += a * xd[j];
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n");
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&min_value);
max_value=min_value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
double
value;
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
GB_subassign_10_and_18.c | //------------------------------------------------------------------------------
// GB_subassign_10_and_18: C(I,J)<M or !M,repl> = A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 10: C(I,J)<M,repl> = A ; using S
// Method 18: C(I,J)<!M,repl> = A ; using S
// M: present
// Mask_comp: true or false
// C_replace: true
// accum: NULL
// A: matrix
// S: constructed
// C: not bitmap: use GB_bitmap_assign instead
// M, A: any sparsity structure.
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_10_and_18
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct, // if true, use the only structure of M
const bool Mask_comp, // if true, !M, else use M
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_A ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 10: C(I,J)<M,repl> = A ; using S
// Method 18: C(I,J)<!M,repl> = A ; using S
//--------------------------------------------------------------------------
// Time: Optimal. Omega (nnz(A)+nnz(S)), since all entries in S+A must be
// traversed, and the corresponding entry in M (even if not present)
// determines the action to take. M can add a log(m) factor if sparse.
//--------------------------------------------------------------------------
// Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all A+S
GB_SUBASSIGN_TWO_SLICE (A, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (Sfound && !Afound)
{
// S (i,j) is present but A (i,j) is not
// ----[C . 1] or [X . 1]-------------------------------
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still zombie
// ----[C . 0] or [X . 0]-------------------------------
// [X . 0]: action: ( X ): still a zombie
// [C . 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && Afound)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
else if (Sfound && Afound)
{
// both S (i,j) and A (i,j) present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
GB_C_S_LOOKUP ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): A to C no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_matrix ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// S (i,j) is present but A (i,j) is not
// ----[C . 1] or [X . 1]-------------------------------
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still zombie
// ----[C . 0] or [X . 0]-------------------------------
// [X . 0]: action: ( X ): still a zombie
// [C . 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iA < iS)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (A) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
GB_C_S_LOOKUP ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): A to C no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_matrix ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list S (:,j) has entries. List A (:,j) exhausted.
while (pS < pS_end)
{
// ----[C . 1] or [X . 1]-----------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still a zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// S (i,j) is not present, A (i,j) is present
int64_t iA = GBI (Ai, pA, Avlen) ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (A) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (!Sfound && Afound)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
}
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// S (i,j) is present but A (i,j) is not
GB_NEXT (S) ;
}
else if (iA < iS)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
}
GB_NEXT (A) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// S (i,j) is not present, A (i,j) is present
int64_t iA = GBI (Ai, pA, Avlen) ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
}
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
random.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*************************************************************
Copyright (c) 2013 The University of Tennessee. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
- Redistributions of source code must retain the
above copyright notice, this list of conditions and
the following disclaimer.
- Redistributions in binary form must reproduce the
above copyright notice, this list of conditions and
the following disclaimer listed in this license in the
documentation and/or other materials provided with the
distribution.
- Neither the name of the copyright holders nor the names
of its contributors may be used to endorse or promote
products derived from this software without specific
prior written permission.
This software is provided by the copyright holders and
contributors "as is" and any express or implied warranties,
including, but not limited to, the implied warranties of
merchantability and fitness for a particular purpose are
disclaimed. in no event shall the copyright owner or
contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but
not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption)
however caused and on any theory of liability, whether in
contract, strict liability, or tort (including negligence or
otherwise) arising in any way out of the use of this software,
even if advised of the possibility of such damage.
*************************************************************/
/*******************************************************************
NAME: RandomAccess
PURPOSE: This program tests the efficiency of the memory subsystem to
update elements of an array with irregular stride.
USAGE: The program takes as input the number of threads involved, the 2log
of the size of the table that gets updated, the ratio of table size
over number of updates, and the vector length of simultaneously
updatable table elements. When multiple threads participate, they
all share the same table. This can lead to conflicts in memory
accesses. Setting the ATOMICFLAG variable in the Makefile will
avoid conflicts, but at a large price, because the atomic
directive is currently not implemented on IA for the type of update
operation used here. Instead, a critical section is used, serializing
the main loop.
If the CHUNKFLAG variable is set, contiguous, non-overlapping chunks
of the array are assigned to individual threads. Each thread computes
all pseudo-random indices into the table, but only updates table
elements that fall inside its chunk. Hence, this version is safe, and
there is no false sharing. It is also non-scalable.
<progname> <# threads> <log2 tablesize> <#update ratio> <vector length>
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
PRK_starts()
poweroftwo()
NOTES: This program is derived from HPC Challenge Random Access. The random
number generator computes successive powers of 0x2, modulo the
primitive polynomial x^63+x^2+x+1. The principal differences between
this code and the HPCC version are:
- we start the stream of random numbers not with seed 0x1, but the
SEQSEED-th element in the stream of powers of 0x2.
- the timed code applies the RandomAccess operator twice to the table of
computed resuls (starting with the same seed(s) for "ran" in both
iterations. The second pass makes sure that any update to any table
element that sets high-order bits in the first pass resets those bits
to zero.
- the verification test now simply constitutes checking whether table
element j equals j.
- the number of independent streams (vector length) can be changed by the
user.
We note that the vectorized version of this code (i.e. nstarts unequal
to 1), does not feature exactly the same sequence of accesses and
intermediate update values in the table as the scalar version. The
reason for this is twofold.
1. the elements in the stream of powers of 0x2 that get computed outside
the main timed loop as seeds for independent streams in the vectorized
version, using the jump-ahead function PRK_starts, are computed inside
the timed loop for the scalar version. However, since both versions do
the same number of timed random accesses, the vectorized version must
progress further in the sequence of powers of 0x2.
2. The independent streams of powers of 0x2 computed in the vectorized
version can (and will) cause updates of the same elements of the table
in an order that is not consistent with the scalar version. That is,
distinct values of "ran" can refer to the same table element
"ran&(tablesize-1)," but the operation
Table[ran&(tablesize-1)] ^= ran will deposit different values in that
table element for different values of ran. At the end of each pass over
the data set, the table will contain the same values in the vector and
scalar version (ignoring the small differences caused by 1.) because of
commutativity of the XOR operator. If the update operator had been
non-commutative, the vector and scalar version would have yielded
different results.
HISTORY: Written by Rob Van der Wijngaart, June 2006.
Histogram code (verbose mode) courtesy Roger Golliver
Shared table version derived from random.c by Michael Frumkin, October 2006
************************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* Define constants */
/* PERIOD = (2^63-1)/7 = 7*73*127*337*92737*649657 */
#if LONG_IS_64BITS
#define POLY 0x0000000000000007UL
#define PERIOD 1317624576693539401L
/* sequence number in stream of random numbers to be used as initial value */
#define SEQSEED 834568137686317453L
#else
#define POLY 0x0000000000000007ULL
#define PERIOD 1317624576693539401LL
/* sequence number in stream of random numbers to be used as initial value */
#define SEQSEED 834568137686317453LL
#endif
#if HPCC
#undef ATOMIC
#undef CHUNKED
#undef ERRORPERCENT
#define ERRORPERCENT 1
#else
#if CHUNKED
#undef ATOMIC
#endif
#endif
static u64Int PRK_starts(s64Int);
#if UNUSED
static int poweroftwo(int);
#endif
int main(int argc, char **argv) {
int my_ID; /* thread ID */
int update_ratio; /* multiplier of tablesize for # updates */
int nstarts; /* vector length */
s64Int i, j, round, oldsize; /* dummies */
s64Int error; /* number of incorrect table elements */
s64Int tablesize; /* aggregate table size (all threads */
s64Int nupdate; /* number of updates per thread */
size_t tablespace; /* bytes per thread required for table */
u64Int *ran; /* vector of random numbers */
s64Int index; /* index into Table */
#if VERBOSE
u64Int * RESTRICT Hist; /* histogram of # updates to table elements */
unsigned int *HistHist; /* histogram of update frequencies */
#endif
u64Int * RESTRICT Table; /* (pseudo-)randomly accessed array */
double random_time;
int nthread_input; /* thread parameters */
int nthread;
int log2tablesize; /* log2 of aggregate table size */
int num_error=0; /* flag that signals that requested and obtained
numbers of threads are the same */
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("OpenMP Random Access test\n");
#if LONG_IS_64BITS
if (sizeof(long) != 8) {
printf("ERROR: Makefile says \"long\" is 8 bytes, but it is %d bytes\n",
sizeof(long));
exit(EXIT_FAILURE);
}
#endif
/*********************************************************************
** process and test input parameters
*********************************************************************/
if (argc != 5){
printf("Usage: %s <# threads> <log2 tablesize> <#update ratio> ", *argv);
printf("<vector length>\n");
exit(EXIT_FAILURE);
}
nthread_input = atoi(*++argv);
/* test whether number of threads is positive */
if (nthread_input <1) {
printf("ERROR: Invalid number of threads: %d, must be positive\n",
nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
log2tablesize = atoi(*++argv);
if (log2tablesize < 1){
printf("ERROR: Log2 tablesize is %d; must be >= 1\n",log2tablesize);
exit(EXIT_FAILURE);
}
update_ratio = atoi(*++argv);
/* test whether update ratio is positive */
if (update_ratio <1) {
printf("ERROR: Invalid update ratio: %d, must be positive\n",
update_ratio);
exit(EXIT_FAILURE);
}
nstarts = atoi(*++argv);
/* if whether vector length is positive */
if (nstarts <1) {
printf("ERROR: Invalid vector length: %d, must be positive\n",
nstarts);
exit(EXIT_FAILURE);
}
/* do some additional divisibility tests */
if (nstarts%nthread_input) {
printf("ERROR: vector length %d must be divisible by # threads %d\n",
nstarts, nthread_input);
exit(EXIT_FAILURE);
}
if (update_ratio%nstarts) {
printf("ERROR: update ratio %d must be divisible by vector length %d\n",
update_ratio, nstarts);
exit(EXIT_FAILURE);
}
/* compute table size carefully to make sure it can be represented */
tablesize = 1;
for (i=0; i<log2tablesize; i++) {
oldsize = tablesize;
tablesize <<=1;
if (tablesize/2 != oldsize) {
printf("Requested table size too large; reduce log2 tablesize = %d\n",
log2tablesize);
exit(EXIT_FAILURE);
}
}
/* even though the table size can be represented, computing the space
required for the table may lead to overflow */
tablespace = (size_t) tablesize*sizeof(u64Int);
if ((tablespace/sizeof(u64Int)) != tablesize || tablespace <=0) {
printf("Cannot represent space for table on this system; ");
printf("reduce log2 tablesize\n");
exit(EXIT_FAILURE);
}
#if VERBOSE
Hist = (u64Int *) prk_malloc(tablespace);
HistHist = (unsigned int *) prk_malloc(tablespace);
if (!Hist || ! HistHist) {
printf("ERROR: Could not allocate space for histograms\n");
exit(EXIT_FAILURE);
}
#endif
/* compute number of updates carefully to make sure it can be represented */
nupdate = update_ratio * tablesize;
if (nupdate/tablesize != update_ratio) {
printf("Requested number of updates too large; ");
printf("reduce log2 tablesize or update ratio\n");
exit(EXIT_FAILURE);
}
Table = (u64Int *) prk_malloc(tablespace);
if (!Table) {
printf("ERROR: Could not allocate space of "FSTR64U" bytes for table\n",
(u64Int) tablespace);
exit(EXIT_FAILURE);
}
error = 0;
#pragma omp parallel private(i, j, ran, round, index, my_ID) reduction(+:error)
{
int my_starts;
my_ID = omp_get_thread_num();
#pragma omp master
{
nthread = omp_get_num_threads();
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = "FSTR64U"\n", (u64Int) nthread_input);
printf("Table size (shared) = "FSTR64U"\n", tablesize);
printf("Update ratio = "FSTR64U"\n", (u64Int) update_ratio);
printf("Number of updates = "FSTR64U"\n", nupdate);
printf("Vector length = "FSTR64U"\n", (u64Int) nstarts);
printf("Percent errors allowed = "FSTR64U"\n", (u64Int) ERRORPERCENT);
#if RESTRICT_KEYWORD
printf("No aliasing = on\n");
#else
printf("No aliasing = off\n");
#endif
#if defined(ATOMIC) && !defined(CHUNKED)
printf("Shared table, atomic updates\n");
#elif defined(CHUNKED)
printf("Shared, chunked table\n");
#else
printf("Shared table, non-atomic updates\n");
#endif
}
}
bail_out(num_error);
#if CHUNKED
/* compute upper and lower table bounds for this thread */
u64Int low = my_ID *(tablesize/nthread);
u64Int up = (my_ID+1)*(tablesize/nthread);
my_starts = nstarts;
#else
my_starts = nstarts/nthread;
#endif
ran = (u64Int *) prk_malloc(my_starts*sizeof(u64Int));
if (!ran) {
printf("ERROR: Thread %d Could not allocate %d bytes for random numbers\n",
my_ID, my_starts*(int)sizeof(u64Int));
num_error = 1;
}
bail_out(num_error);
/* initialize the table */
#pragma omp for
for(i=0;i<tablesize;i++) Table[i] = (u64Int) i;
#pragma omp barrier
#pragma omp master
{
random_time = wtime();
}
/* ran is privatized. Must make sure for non-chunked version that
we pick the right section of the originally shared ran array */
#if CHUNKED
int offset = 0;
#else
int offset = my_ID*my_starts;
#endif
/* do two identical rounds of Random Access to make sure we recover
the initial condition */
for (round=0; round <2; round++) {
for (j=0; j<my_starts; j++) {
ran[j] = PRK_starts(SEQSEED+(nupdate/nstarts)*(j+offset));
}
for (j=0; j<my_starts; j++) {
/* because we do two rounds, we divide nupdates in two */
for (i=0; i<nupdate/(nstarts*2); i++) {
ran[j] = (ran[j] << 1) ^ ((s64Int)ran[j] < 0? POLY: 0);
index = ran[j] & (tablesize-1);
#if defined(ATOMIC)
#pragma omp atomic
#elif defined(CHUNKED)
if (index >= low && index < up) {
#endif
Table[index] ^= ran[j];
#if VERBOSE
#pragma omp atomic
Hist[index] += 1;
#endif
#if CHUNKED
}
#endif
}
}
}
#pragma omp master
{
random_time = wtime() - random_time;
}
} /* end of OpenMP parallel region */
/* verification test */
for(i=0;i<tablesize;i++) {
if(Table[i] != (u64Int) i) {
#if VERBOSE
printf("Error Table["FSTR64U"]="FSTR64U"\n",i,Table[i]);
#endif
error++;
}
}
if ((error && (ERRORPERCENT==0)) ||
((double)error/(double)tablesize > ((double) ERRORPERCENT)*0.01)) {
printf("ERROR: number of incorrect table elements = "FSTR64U"\n", error);
exit(EXIT_FAILURE);
}
else {
printf("Solution validates, number of errors: %ld\n",(long) error);
printf("Rate (GUPs/s): %lf, time (s) = %lf\n",
1.e-9*nupdate/random_time,random_time);
}
#if VERBOSE
for(i=0;i<tablesize;i++) HistHist[Hist[i]]+=1;
for(i=0;i<=tablesize;i++) if (HistHist[i] != 0)
printf("HistHist[%4.1d]=%9.1d\n",(int)i,HistHist[i]);
#endif
exit(EXIT_SUCCESS);
}
/* Utility routine to start random number generator at nth step */
u64Int PRK_starts(s64Int n)
{
int i, j;
u64Int m2[64];
u64Int temp, ran;
while (n < 0) n += PERIOD;
while (n > PERIOD) n -= PERIOD;
if (n == 0) return 0x1;
temp = 0x1;
for (i=0; i<64; i++) {
m2[i] = temp;
temp = (temp << 1) ^ ((s64Int) temp < 0 ? POLY : 0);
temp = (temp << 1) ^ ((s64Int) temp < 0 ? POLY : 0);
}
for (i=62; i>=0; i--)
if ((n >> i) & 1)
break;
ran = 0x2;
while (i > 0) {
temp = 0;
for (j=0; j<64; j++)
if ((unsigned int)((ran >> j) & 1))
temp ^= m2[j];
ran = temp;
i -= 1;
if ((n >> i) & 1)
ran = (ran << 1) ^ ((s64Int) ran < 0 ? POLY : 0);
}
return ran;
}
#if UNUSED
/* utility routine that tests whether an integer is a power of two */
int poweroftwo(int n) {
int log2n = 0;
while ((1<<log2n)<n) log2n++;
if (1<<log2n != n) return (-1);
else return (log2n);
}
#endif
|
DRACC_OMP_041_Wrong_ordered_clause_Intra_yes.c | /*
Data race between the values in countervar leading to changing results, by utilising the ordered construct the execution will be sequentially consistent.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define N 42000
int countervar[N];
int init(){
for(int i=0; i<N; i++){
countervar[i]=0;
}
return 0;
}
int count(){
#pragma omp target map(tofrom:countervar[0:N]) device(0)
#pragma omp teams num_teams(1)
#pragma omp distribute parallel for
for (int i=1; i<N; i++){
countervar[i]=countervar[i-1]+1;
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<N; i++){
if(countervar[i]!=i){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
init();
count();
check();
return 0;
} |
cilk_rpy_ewald_polyd.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "timer.h"
#include <sys/time.h>
#include <cilk/cilk.h>
#define M_PI 3.14159265358979323846
#define NTHREADS 240
inline void scalar_rpy_ewald_real(double r, double xi, double a3, double *m11, double *m12)
{
double a = 1.;
double xi2 = xi*xi;
double xi3 = xi2*xi;
double xi5 = xi3*xi2;
double xi7 = xi5*xi2;
double r2 = r*r;
double r4 = r2*r2;
double ri = 1./r;
double ri2 = ri*ri;
double ri3 = ri*ri2;
double erfc_xi_r = erfc(xi*r);
double pi_exp = 1./sqrt(M_PI) * exp(-xi2*r2);
*m11 = (0.75*a*ri + 0.5*a3*ri3)*erfc_xi_r + ( 4*xi7*a3*r4 + 3*xi3*a*r2 - 20*xi5*a3*r2 - 4.5*xi*a + 14*xi3*a3 + xi*a3*ri2)*pi_exp;
*m12 = (0.75*a*ri - 1.5*a3*ri3)*erfc_xi_r + (-4*xi7*a3*r4 - 3*xi3*a*r2 + 16*xi5*a3*r2 + 1.5*xi*a - 2*xi3*a3 - 3*xi*a3*ri2)*pi_exp;
}
inline void scalar_rpy_ewald_recip(double k, double xi, double *m2)
{
double a = 1.;
double a3 = 1.;
double k2 = k*k;
double xii2k2 = k2/(xi*xi);
*m2 = (1. + 0.25*xii2k2 + 0.125*xii2k2*xii2k2) * 6.*M_PI/k2 * exp(-0.25*xii2k2);
}
// note: positions must be wrapped inside the box [0,L]
int rpy_ewald(int np, double * restrict a,
const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk)
{
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
// double temp_0, temp_1, temp_2, temp_3, temp_4, temp_5;
double a3;
double m11, m12, m2;
double eye3_coef;
double r2, r;
int x, y, z;
int i, j;
double *ap0, *ap;
int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2;
#define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2
// int A_VSIZE = ceil(VSIZE/8.0)*8;
// int K_VSIZE = ceil(3*VSIZE/8.0)*8;
// printf("check vsize=%d\n", A_VSIZE);
__declspec(align(64)) double k_array[VSIZE];//1104
__declspec(align(64)) double m2_array[VSIZE];//1104
__declspec(align(64)) double kvec_array[3*VSIZE];//3296
int ind;
__declspec(align(64)) double kvec[8];
double k;
double t;
double vinv = 1./(L*L*L);
double time0, time1;
double time0_real, time1_real;
double time0_recip, time1_recip;
// INDICES for converting for loops
int _b, _index, ib, ib2;
// *************************************************************************
// compute and save coefficients for reciprocal-space sum
// Due to symmetry, only need half of the grid points
ind = 0;
_b = (2*nk+1);
for (_index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated
z = _index%(_b)-nk;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nk;
y = (_index%(_b*_b)-_index%(_b))/_b-nk;
k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z));
scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]);
kvec_array[3*ind ] = 2.*M_PI/L*x;
kvec_array[3*ind+1] = 2.*M_PI/L*y;
kvec_array[3*ind+2] = 2.*M_PI/L*z;
ind++;
}
// #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 )
cilk_for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){
int i, j, _b, _index, x, y, z;
double *ap, *ap0, eye3_coef, r, r2, m11, m12, a3;
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double rvec0[8];
__declspec(align(64)) double temp[8];
i = np-1-(int)((1+sqrt(8*_index1+1))/2);
j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2;
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
eye3_coef = 0.;
rvec0[0] = pos[3*i] - pos[3*j];
rvec0[1] = pos[3*i+1] - pos[3*j+1];
rvec0[2] = pos[3*i+2] - pos[3*j+2];
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
_b = (2*nr+1);
//shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi)
// #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3)
for (_index =0 ;_index < _b*_b*_b; _index++){
z =_index%(_b)-nr;// adjusting the indices
x = (_index-_index%(_b*_b))/(_b*_b)-nr;
y = (_index%(_b*_b)-_index%(_b))/_b-nr;
rvec[0] = rvec0[0] + x*L;
rvec[1] = rvec0[1] + y*L;
rvec[2] = rvec0[2] + z*L;
// compute norm
r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
r = sqrt(r2);
rvec[0] /= r;
rvec[1] /= r;
rvec[2] /= r;
scalar_rpy_ewald_real(r, xi, a3, &m11, &m12);
eye3_coef += m11;
temp[0] += m12 * rvec[0] * rvec[0];
temp[1] += m12 * rvec[0] * rvec[1];
temp[2] += m12 * rvec[0] * rvec[2];
temp[3] += m12 * rvec[1] * rvec[1];
temp[4] += m12 * rvec[1] * rvec[2];
temp[5] += m12 * rvec[2] * rvec[2];
}
// add contribution to eye3 term
temp[0] += eye3_coef;
temp[3] += eye3_coef;
temp[5] += eye3_coef;
// sum into global matrix (only lower-triangular part)
// Use matlab to add transpose
ap0 = &a[np*3*3*i + 3*j];
ap = ap0;
*ap++ = temp[0];
*ap++ = temp[1];
*ap = temp[2];
ap = ap0+np*3;
*ap++ = temp[1];
*ap++ = temp[3];
*ap = temp[4];
ap = ap0+np*3+np*3;
*ap++ = temp[2];
*ap++ = temp[4];
*ap = temp[5];
}
// *************************************************************************
// reciprocal-space sum
// #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3)
cilk_for (_index = np*(np+1)/2-1; _index>=0; _index--){
int i, j, ind;
double *ap, *ap0, k, m2, t, a3;
__declspec(align(64)) double temp[8];
__declspec(align(64)) double rvec[8];
__declspec(align(64)) double kvec[8];
i = np-1-(int)((-1+sqrt(8*_index+1))/2);
j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2;
rvec[0] = pos[3*i+0] - pos[3*j];
rvec[1] = pos[3*i+1] - pos[3*j+1];
rvec[2] = pos[3*i+2] - pos[3*j+2];
temp[0] = 0.;
temp[1] = 0.; temp[3] = 0.;
temp[2] = 0.; temp[4] = 0.; temp[5] = 0.;
a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]);
for (ind=0; ind<vsize; ind++)
{
k = k_array[ind];
m2 = m2_array[ind];
kvec[0] = kvec_array[3*ind ];
kvec[1] = kvec_array[3*ind+1];
kvec[2] = kvec_array[3*ind+2];
t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.);
kvec[0] /= k;
kvec[1] /= k;
kvec[2] /= k;
temp[0] += t * (1. - kvec[0]*kvec[0]);
temp[1] += t * - kvec[0]*kvec[1];
temp[2] += t * - kvec[0]*kvec[2];
temp[3] += t * (1. - kvec[1]*kvec[1]);
temp[4] += t * - kvec[1]*kvec[2];
temp[5] += t * (1. - kvec[2]*kvec[2]);
}
// sum into matrix
// sum with existing values
ap0 = &a[np*3*3*i + 3*j];
ap = ap0;
*ap++ += temp[0];
*ap++ += temp[1];
*ap += temp[2];
ap = ap0+np*3;
*ap++ += temp[1];
*ap++ += temp[3];// diagonal element
*ap += temp[4];
ap = ap0+np*3+np*3;
*ap++ += temp[2];
*ap++ += temp[4];
*ap += temp[5];// diagonal element
}
// *************************************************************************
// self-part
for (i=0; i<np; i++)// adding some term to diagonal
{
t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI);
t *= 0.5;
for (j=0; j<3; j++)
{
ind = 3*i+j;
a[ind*np*3+ind] = a[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition
}
}
return 0;
}
|
pr38633.c | /* PR middle-end/38633 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
foo ()
{
#pragma omp parallel
{
struct A { int i; } j;
j.i = 6;
j.i++;
}
}
|
update_ops_matrix_dense_single.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _USE_SIMD
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#endif
//void single_qubit_dense_matrix_gate_old_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
//void single_qubit_dense_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
//void single_qubit_dense_matrix_gate_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
//void single_qubit_dense_matrix_gate_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim);
void single_qubit_dense_matrix_gate(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
//single_qubit_dense_matrix_gate_old_single(target_qubit_index, matrix, state, dim);
//single_qubit_dense_matrix_gate_old_parallel(target_qubit_index, matrix, state, dim);
//single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim);
//single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim);
//single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim);
//single_qubit_dense_matrix_gate_parallel_simd(target_qubit_index, matrix, state, dim);
//return;
#ifdef _USE_SIMD
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim);
}
else {
single_qubit_dense_matrix_gate_parallel_simd(target_qubit_index, matrix, state, dim);
}
#else
single_qubit_dense_matrix_gate_single_simd(target_qubit_index, matrix, state, dim);
#endif
#else
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
//single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim);
single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim);
}
else {
//single_qubit_dense_matrix_gate_parallel_unroll(target_qubit_index, matrix, state, dim);
single_qubit_dense_matrix_gate_parallel(target_qubit_index, matrix, state, dim);
}
#else
//single_qubit_dense_matrix_gate_single_unroll(target_qubit_index, matrix, state, dim);
single_qubit_dense_matrix_gate_single(target_qubit_index, matrix, state, dim);
#endif
#endif
}
void single_qubit_dense_matrix_gate_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1);
ITYPE basis_1 = basis_0 + mask;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1;
state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1;
}
}
#ifdef _OPENMP
void single_qubit_dense_matrix_gate_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1);
ITYPE basis_1 = basis_0 + mask;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1;
state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1;
}
}
#endif
void single_qubit_dense_matrix_gate_single_unroll(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
if (target_qubit_index == 0) {
ITYPE basis = 0;
for (basis = 0; basis < dim; basis+=2) {
CTYPE val0a = state[basis];
CTYPE val1a = state[basis + 1];
CTYPE res0a = val0a * matrix[0] + val1a * matrix[1];
CTYPE res1a = val0a * matrix[2] + val1a * matrix[3];
state[basis] = res0a;
state[basis + 1] = res1a;
}
}
else {
ITYPE state_index = 0;
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1);
ITYPE basis_1 = basis_0 + mask;
CTYPE val0a = state[basis_0];
CTYPE val0b = state[basis_0 + 1];
CTYPE val1a = state[basis_1];
CTYPE val1b = state[basis_1 + 1];
CTYPE res0a = val0a * matrix[0] + val1a * matrix[1];
CTYPE res1b = val0b * matrix[2] + val1b * matrix[3];
CTYPE res1a = val0a * matrix[2] + val1a * matrix[3];
CTYPE res0b = val0b * matrix[0] + val1b * matrix[1];
state[basis_0] = res0a;
state[basis_0 + 1] = res0b;
state[basis_1] = res1a;
state[basis_1 + 1] = res1b;
}
}
}
#ifdef _OPENMP
void single_qubit_dense_matrix_gate_parallel_unroll(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
if (target_qubit_index == 0) {
ITYPE basis = 0;
#pragma omp parallel for
for (basis = 0; basis < dim; basis += 2) {
CTYPE val0a = state[basis];
CTYPE val1a = state[basis + 1];
CTYPE res0a = val0a * matrix[0] + val1a * matrix[1];
CTYPE res1a = val0a * matrix[2] + val1a * matrix[3];
state[basis] = res0a;
state[basis + 1] = res1a;
}
}
else {
ITYPE state_index = 0;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1);
ITYPE basis_1 = basis_0 + mask;
CTYPE val0a = state[basis_0];
CTYPE val0b = state[basis_0 + 1];
CTYPE val1a = state[basis_1];
CTYPE val1b = state[basis_1 + 1];
CTYPE res0a = val0a * matrix[0] + val1a * matrix[1];
CTYPE res1b = val0b * matrix[2] + val1b * matrix[3];
CTYPE res1a = val0a * matrix[2] + val1a * matrix[3];
CTYPE res0b = val0b * matrix[0] + val1b * matrix[1];
state[basis_0] = res0a;
state[basis_0 + 1] = res0b;
state[basis_1] = res1a;
state[basis_1 + 1] = res1b;
}
}
}
#endif
#ifdef _USE_SIMD
void single_qubit_dense_matrix_gate_single_simd(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
if (target_qubit_index == 0) {
ITYPE basis = 0;
__m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2]));
for (basis = 0; basis < dim; basis += 2) {
double* ptr = (double*)(state + basis);
__m256d data = _mm256_loadu_pd(ptr);
__m256d data_u0 = _mm256_mul_pd(data, mv00);
__m256d data_u1 = _mm256_mul_pd(data, mv01);
__m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1);
data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_d0 = _mm256_mul_pd(data, mv20);
__m256d data_d1 = _mm256_mul_pd(data, mv21);
__m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1);
data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_r = _mm256_hadd_pd(data_u2, data_d2);
data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
_mm256_storeu_pd(ptr, data_r);
}
}
else {
ITYPE state_index = 0;
__m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1]));
__m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2]));
__m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3]));
__m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3]));
for (state_index = 0; state_index < loop_dim; state_index+=2) {
ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1);
ITYPE basis_1 = basis_0 + mask;
double* ptr0 = (double*)(state + basis_0);
double* ptr1 = (double*)(state + basis_1);
__m256d data0 = _mm256_loadu_pd(ptr0);
__m256d data1 = _mm256_loadu_pd(ptr1);
__m256d data_u2 = _mm256_mul_pd(data0, mv00);
__m256d data_u3 = _mm256_mul_pd(data1, mv10);
__m256d data_u4 = _mm256_mul_pd(data0, mv01);
__m256d data_u5 = _mm256_mul_pd(data1, mv11);
__m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4);
__m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5);
__m256d data_d2 = _mm256_mul_pd(data0, mv20);
__m256d data_d3 = _mm256_mul_pd(data1, mv30);
__m256d data_d4 = _mm256_mul_pd(data0, mv21);
__m256d data_d5 = _mm256_mul_pd(data1, mv31);
__m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4);
__m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5);
__m256d data_r0 = _mm256_add_pd(data_u6, data_u7);
__m256d data_r1 = _mm256_add_pd(data_d6, data_d7);
_mm256_storeu_pd(ptr0, data_r0);
_mm256_storeu_pd(ptr1, data_r1);
}
}
}
#ifdef _OPENMP
void single_qubit_dense_matrix_gate_parallel_simd(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
if (target_qubit_index == 0) {
ITYPE basis = 0;
__m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2]));
#pragma omp parallel for
for (basis = 0; basis < dim; basis += 2) {
double* ptr = (double*)(state + basis);
__m256d data = _mm256_loadu_pd(ptr);
__m256d data_u0 = _mm256_mul_pd(data, mv00);
__m256d data_u1 = _mm256_mul_pd(data, mv01);
__m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1);
data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_d0 = _mm256_mul_pd(data, mv20);
__m256d data_d1 = _mm256_mul_pd(data, mv21);
__m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1);
data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
__m256d data_r = _mm256_hadd_pd(data_u2, data_d2);
data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216
_mm256_storeu_pd(ptr, data_r);
}
}
else {
ITYPE state_index = 0;
__m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0]));
__m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0]));
__m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1]));
__m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1]));
__m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2]));
__m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2]));
__m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3]));
__m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3]));
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1);
ITYPE basis_1 = basis_0 + mask;
double* ptr0 = (double*)(state + basis_0);
double* ptr1 = (double*)(state + basis_1);
__m256d data0 = _mm256_loadu_pd(ptr0);
__m256d data1 = _mm256_loadu_pd(ptr1);
__m256d data_u2 = _mm256_mul_pd(data0, mv00);
__m256d data_u3 = _mm256_mul_pd(data1, mv10);
__m256d data_u4 = _mm256_mul_pd(data0, mv01);
__m256d data_u5 = _mm256_mul_pd(data1, mv11);
__m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4);
__m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5);
__m256d data_d2 = _mm256_mul_pd(data0, mv20);
__m256d data_d3 = _mm256_mul_pd(data1, mv30);
__m256d data_d4 = _mm256_mul_pd(data0, mv21);
__m256d data_d5 = _mm256_mul_pd(data1, mv31);
__m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4);
__m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5);
__m256d data_r0 = _mm256_add_pd(data_u6, data_u7);
__m256d data_r1 = _mm256_add_pd(data_d6, data_d7);
_mm256_storeu_pd(ptr0, data_r0);
_mm256_storeu_pd(ptr1, data_r1);
}
}
}
#endif
#endif
/*
void single_qubit_dense_matrix_gate_old_single(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
// target mask
const ITYPE target_mask = 1ULL << target_qubit_index;
// loop variables
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create index
ITYPE basis_0 = insert_zero_to_basis_index(state_index, target_mask, target_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ target_mask;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1;
state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1;
}
}
#ifdef _OPENMP
void single_qubit_dense_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
// target mask
const ITYPE target_mask = 1ULL << target_qubit_index;
// loop variables
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create index
ITYPE basis_0 = insert_zero_to_basis_index(state_index, target_mask, target_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ target_mask;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1;
state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1;
}
}
#endif
*/
|
conv_kernel_mips.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_kernel_mips.h"
#include "wino_conv_kernel_mips.h"
#if __mips_msa
#include <msa.h>
#endif
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static int get_private_mem_size(struct ir_tensor* filter)
{
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
void im2col(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int outc, int ksize_h,
int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw)
{
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
*(out++) = *in;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
int input_zero = 0;
if (input->data_type == TENGINE_DT_UINT8)
input_zero = input->zero_point;
im2col(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3],
output->dims[1] / param->group, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w,
param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w);
}
void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 2;
int remian_size_start = nn_size << 2;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const float* img = pB + i;
float* tmp = pB_t + (i / 4) * 4 * K;
for (int j = 0; j < K; j++)
{
#if __mips_msa
__msa_st_w(__msa_ld_w(img, 0), tmp, 0);
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
#endif // __mips_msa
tmp += 4;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 4 + i % 4) * 4 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
// unloop output M, unloop N, packet 4x4, using intrinsic
static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 2;
remain_outch_start = nn_outch << 2;
// output ch0 - ch3
#pragma omp parallel for num_threads(num_thread)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __mips_msa
v4f32 _sum0 = {0.f};
v4f32 _sum1 = {0.f};
v4f32 _sum2 = {0.f};
v4f32 _sum3 = {0.f};
for (int k = 0; k < K; k++)
{
// k0
v4f32 _vb = (v4f32)__msa_ld_w(vb, 0);
v4f32 _va0 = {va[0], va[0], va[0], va[0]};
v4f32 _va1 = {va[1], va[1], va[1], va[1]};
v4f32 _va2 = {va[2], va[2], va[2], va[2]};
v4f32 _va3 = {va[3], va[3], va[3], va[3]};
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_vb, _va3)); // sum3 = (a00-a03) * k30
va += 4;
vb += 4;
}
__msa_st_w((v4i32)_sum0, output0, 0);
__msa_st_w((v4i32)_sum1, output1, 0);
__msa_st_w((v4i32)_sum2, output2, 0);
__msa_st_w((v4i32)_sum3, output3, 0);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __mips_msa
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
#if __mips_msa
v4f32 _sum0_3 = {0.f};
v4f32 _sum0 = {0.f};
v4f32 _sum1 = {0.f};
v4f32 _sum2 = {0.f};
v4f32 _sum3 = {0.f};
int k = 0;
for (; k + 3 < K; k = k + 4)
{
v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]};
v4f32 _vb1 = {vb[1], vb[1], vb[1], vb[1]};
v4f32 _vb2 = {vb[2], vb[2], vb[2], vb[2]};
v4f32 _vb3 = {vb[3], vb[3], vb[3], vb[3]};
v4f32 _va0 = (v4f32)__msa_ld_w(va, 0);
v4f32 _va1 = (v4f32)__msa_ld_w(va + 4, 0);
v4f32 _va2 = (v4f32)__msa_ld_w(va + 8, 0);
v4f32 _va3 = (v4f32)__msa_ld_w(va + 12, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_va0, _vb0)); // sum0 += (k00-k30) * a00
_sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_va1, _vb1)); // sum1 += (k01-k31) * a10
_sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_va2, _vb2)); // sum2 += (k02-k32) * a20
_sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_va3, _vb3)); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = __msa_fadd_w(_sum0, _sum1);
_sum2 = __msa_fadd_w(_sum2, _sum3);
_sum0_3 = __msa_fadd_w(_sum2, _sum0);
// _sum0_3 = __msa_fadd_w(_sum0_3, _sum2);
for (; k < K; k++)
{
v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]};
v4f32 _va = (v4f32)__msa_ld_w(va, 0);
_sum0_3 = __msa_fadd_w(_sum0_3, __msa_fmul_w(_va, _vb0)); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
output0[0] = _sum0_3[0];
output1[0] = _sum0_3[1];
output2[0] = _sum0_3[2];
output3[0] = _sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __mips_msa
output0++;
output1++;
output2++;
output3++;
}
}
// output ch0
#pragma omp parallel for num_threads(num_thread)
for (int i=remain_outch_start; i<M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __mips_msa
v4f32 _sum0 = {0.f};
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
v4f32 _va0 = {va[0], va[0], va[0], va[0]};
v4f32 _va1 = {va[1], va[1], va[1], va[1]};
v4f32 _va2 = {va[2], va[2], va[2], va[2]};
v4f32 _va3 = {va[3], va[3], va[3], va[3]};
v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0);
v4f32 _vb1 = (v4f32)__msa_ld_w(vb + 4, 0);
v4f32 _vb2 = (v4f32)__msa_ld_w(vb + 8, 0);
v4f32 _vb3 = (v4f32)__msa_ld_w(vb + 12, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb1, _va1)); // sum0 += (a10-a13) * k01
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb2, _va2)); // sum0 += (a20-a23) * k02
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb3, _va3)); // sum0 += (a30-a33) * k03
va += 4;
vb += 16;
}
for (; k < K; k++)
{
// k0
v4f32 _va0 = {va[0]};
v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00
va += 1;
vb += 4;
}
__msa_st_w((v4i32)_sum0, output, 0);
#else
float sum[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n];
}
#endif // __mips_msa
output += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
int k = 0;
#if __mips_msa
v4f32 _sum0 = {0.f};
for (; k + 3 < K; k += 4)
{
v4f32 _p0 = (v4f32)__msa_ld_w(vb, 0);
v4f32 _k0 = (v4f32)__msa_ld_w(va, 0);
_sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = 0.f;
#endif // __mips_msa
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = output_fp32;
sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int input_chan = param->input_channel;
int output_chan = param->output_channel;
int group = param->group;
if (in_h <= 10 && in_w <= 10)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 ||
dilation_w != 1 || input_chan < 16 || output_chan < 16)
return 0;
return 1;
}
int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
return elem_size * output_xy * kernel_size;
}
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
return (4 * K * (N / 4 + N % 4)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter)
{
int size = 4 * K * (M / 4 + M % 4) * filter->elem_size;
return size;
}
void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 2;
int remain_outch_start = nn_outch << 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 4 + p % 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_im2col_pack4_mem)
{
int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
interleave(filter_tensor, priv_info);
if (priv_info->external_interleave_pack4_mem)
{
int M = filter_tensor->dims[0];
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer_pack4 = mem;
priv_info->interleave_buffer_pack4_size = mem_size;
conv_hcl_interleave_pack4(M, K, priv_info);
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
}
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
return wino_conv_hcl_postrun(priv_info);
}
if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem &&
priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL)
{
sys_free(priv_info->im2col_buffer_pack4);
priv_info->im2col_buffer_pack4 = NULL;
}
if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
return 0;
}
int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
if (priv_info->winograd)
{
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread,
cpu_affinity);
}
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col_ir(input_tensor, output_tensor, priv_info, param, i, j);
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int N = output_tensor->dims[2] * output_tensor->dims[3];
float* im2col_fp32 = priv_info->im2col_buffer;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
input_pack4(K, N, im2col_fp32, im2col_pack4_fp32, num_thread);
if (type == TENGINE_DT_FP32)
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
}
return 0;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 1;
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
return 0;
}
|
simplehash.c | /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced
* under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National
* Laboratory (LANL), which is operated by Los Alamos National Security, LLC
* for the U.S. Department of Energy. The U.S. Government has rights to use,
* reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
* ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified
* to produce derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* Under this license, it is required to include a reference to this work. We
* request that each derivative work contain a reference to LANL Copyright
* Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly
* measured. In addition, it is requested that a modifier is included as in
* the following example:
*
* //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003
*
* This is LANL Copyright Disclosure C14043/LA-CC-14-003
*/
#include <stdio.h>
#include <stdlib.h>
#include "simplehash.h"
#ifdef HAVE_OPENCL
#include "simplehashlib_kern.inc"
#include "simplehash_kern.inc"
#endif
static ulong AA;
static ulong BB;
static ulong prime=4294967291;
static uint hashtablesize;
static uint hash_stride;
static uint hash_ncells;
static uint write_hash_collisions;
static uint read_hash_collisions;
static double write_hash_collisions_runsum = 0.0;
static double read_hash_collisions_runsum = 0.0;
static uint write_hash_collisions_count = 0;
static uint read_hash_collisions_count = 0;
static uint hash_report_level = 0;
static uint hash_queries;
static uint hash_method = QUADRATIC;
static uint hash_jump_prime = 41;
static double hash_mult = 3.0;
static int do_compact_hash = 0;
float mem_opt_factor;
int (*read_hash)(ulong, int *);
void (*write_hash)(uint, ulong, int *);
#ifdef _OPENMP
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void (*write_hash_openmp)(uint, ulong, int *);
#else
void (*write_hash_openmp)(uint, ulong, int *, omp_lock_t * lock);
#endif
#endif
int get_hash_method(void) {
return(hash_method);
}
long long get_hashtablesize(void) {
return(hashtablesize);
}
int *compact_hash_init(int ncells, uint isize, uint jsize, int do_init, uint report_level){
hash_ncells = 0;
write_hash_collisions = 0;
read_hash_collisions = 0;
hash_queries = 0;
hash_report_level = report_level;
hash_stride = isize;
int *hash = NULL;
uint compact_hash_size = (uint)((double)ncells*hash_mult);
compact_hash_size = ncells;
uint perfect_hash_size = (uint)(isize*jsize);
float hash_mem_factor = 20.0;
float hash_mem_ratio = (double)perfect_hash_size/(double)compact_hash_size;
if (mem_opt_factor != 1.0) hash_mem_factor /= (mem_opt_factor*0.2);
do_compact_hash = (hash_mem_ratio < hash_mem_factor) ? 0 : 1;
do_compact_hash = 1;
if (hash_report_level >= 2) printf("DEBUG do_compact_hash %d hash_mem_ratio %f hash_mem_factor %f mem_opt_factor %f perfect_hash_size %u compact_hash_size %u\n",do_compact_hash,hash_mem_ratio,hash_mem_factor,mem_opt_factor,perfect_hash_size,compact_hash_size);
if (do_compact_hash) {
hashtablesize = compact_hash_size;
AA = (ulong)(1.0+(double)(prime-1)*drand48());
BB = (ulong)(0.0+(double)(prime-1)*drand48());
if (AA > prime-1 || BB > prime-1) exit(0);
if (hash_report_level > 1) printf("Factors AA %lu BB %lu\n",AA,BB);
hash = (int *)malloc(2*hashtablesize*sizeof(int));
for (uint ii = 0; ii<2*hashtablesize; ii+=2){
hash[ii] = -1;
}
if (hash_method == LINEAR){
if (hash_report_level == 0){
read_hash = read_hash_linear;
write_hash = write_hash_linear;
} else if (hash_report_level == 1){
read_hash = read_hash_linear_report_level_1;
write_hash = write_hash_linear_report_level_1;
} else if (hash_report_level == 2){
read_hash = read_hash_linear_report_level_2;
write_hash = write_hash_linear_report_level_2;
} else if (hash_report_level == 3){
read_hash = read_hash_linear_report_level_3;
write_hash = write_hash_linear_report_level_3;
}
} else if (hash_method == QUADRATIC) {
if (hash_report_level == 0){
read_hash = read_hash_quadratic;
write_hash = write_hash_quadratic;
} else if (hash_report_level == 1){
read_hash = read_hash_quadratic_report_level_1;
write_hash = write_hash_quadratic_report_level_1;
} else if (hash_report_level == 2){
read_hash = read_hash_quadratic_report_level_2;
write_hash = write_hash_quadratic_report_level_2;
} else if (hash_report_level == 3){
read_hash = read_hash_quadratic_report_level_3;
write_hash = write_hash_quadratic_report_level_3;
}
} else if (hash_method == PRIME_JUMP) {
if (hash_report_level == 0){
read_hash = read_hash_primejump;
write_hash = write_hash_primejump;
} else if (hash_report_level == 1){
read_hash = read_hash_primejump_report_level_1;
write_hash = write_hash_primejump_report_level_1;
} else if (hash_report_level == 2){
read_hash = read_hash_primejump_report_level_2;
write_hash = write_hash_primejump_report_level_2;
} else if (hash_report_level == 3){
read_hash = read_hash_primejump_report_level_3;
write_hash = write_hash_primejump_report_level_3;
}
}
} else {
hashtablesize = perfect_hash_size;
hash = (int *)malloc(hashtablesize*sizeof(int));
if (do_init) {
for (uint ii = 0; ii<hashtablesize; ii++){
hash[ii] = -1;
}
}
read_hash = read_hash_perfect;
write_hash = write_hash_perfect;
}
if (hash_report_level >= 2) {
printf("Hash table size %u perfect hash table size %u memory savings %u by percentage %lf\n",
hashtablesize,isize*jsize,isize*jsize-hashtablesize,
(double)hashtablesize/(double)(isize*jsize));
}
return(hash);
}
#ifdef _OPENMP
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int *compact_hash_init_openmp(int ncells, uint isize, uint jsize, int do_init, uint report_level){
#else
int *compact_hash_init_openmp(int ncells, uint isize, uint jsize, int do_init, uint report_level, omp_lock_t **lock){
#endif
hash_ncells = 0;
write_hash_collisions = 0;
read_hash_collisions = 0;
hash_queries = 0;
hash_report_level = report_level;
hash_stride = isize;
int *hash = NULL;
if (choose_hash_method != METHOD_UNSET) hash_method = choose_hash_method;
uint compact_hash_size = (uint)((double)ncells*hash_mult);
uint perfect_hash_size = (uint)(isize*jsize);
if (hash_method == METHOD_UNSET){
float hash_mem_factor = 20.0;
float hash_mem_ratio = (double)perfect_hash_size/(double)compact_hash_size;
if (mem_opt_factor != 1.0) hash_mem_factor /= (mem_opt_factor*0.2);
hash_method = (hash_mem_ratio < hash_mem_factor) ? PERFECT_HASH : QUADRATIC;
//hash_method = QUADRATIC;
if (hash_report_level >= 2) printf("DEBUG hash_method %d hash_mem_ratio %f hash_mem_factor %f mem_opt_factor %f perfect_hash_size %u compact_hash_size %u\n",
hash_method,hash_mem_ratio,hash_mem_factor,mem_opt_factor,perfect_hash_size,compact_hash_size);
}
int do_compact_hash = (hash_method == PERFECT_HASH) ? 0 : 1;
if (hash_report_level >= 2) printf("DEBUG do_compact_hash %d hash_method %d perfect_hash_size %u compact_hash_size %u\n",
do_compact_hash,hash_method,perfect_hash_size,compact_hash_size);
if (do_compact_hash) {
hashtablesize = compact_hash_size;
//srand48(0);
AA = (ulong)(1.0+(double)(prime-1)*drand48());
BB = (ulong)(0.0+(double)(prime-1)*drand48());
if (AA > prime-1 || BB > prime-1) exit(0);
if (hash_report_level > 1) printf("Factors AA %lu BB %lu\n",AA,BB);
hash = (int *)genvector(2*hashtablesize,sizeof(int));
#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
(*lock) = (omp_lock_t *)malloc(hashtablesize*sizeof(omp_lock_t));
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (uint ii = 0; ii<hashtablesize; ii++){
hash[2*ii] = -1;
#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
omp_init_lock(&((*lock)[ii]));
#endif
}
if (hash_method == LINEAR){
if (hash_report_level == 0){
read_hash = read_hash_linear;
write_hash_openmp = write_hash_linear_openmp;
} else if (hash_report_level == 1){
read_hash = read_hash_linear_report_level_1;
write_hash_openmp = write_hash_linear_openmp_report_level_1;
} else if (hash_report_level == 2){
read_hash = read_hash_linear_report_level_2;
write_hash_openmp = write_hash_linear_openmp_report_level_2;
} else if (hash_report_level == 3){
read_hash = read_hash_linear_report_level_3;
write_hash_openmp = write_hash_linear_openmp_report_level_3;
}
} else if (hash_method == QUADRATIC) {
if (hash_report_level == 0){
read_hash = read_hash_quadratic;
write_hash_openmp = write_hash_quadratic_openmp;
} else if (hash_report_level == 1){
read_hash = read_hash_quadratic_report_level_1;
write_hash_openmp = write_hash_quadratic_openmp_report_level_1;
} else if (hash_report_level == 2){
read_hash = read_hash_quadratic_report_level_2;
write_hash_openmp = write_hash_quadratic_openmp_report_level_2;
} else if (hash_report_level == 3){
read_hash = read_hash_quadratic_report_level_3;
write_hash_openmp = write_hash_quadratic_openmp_report_level_3;
}
} else if (hash_method == PRIME_JUMP) {
if (hash_report_level == 0){
read_hash = read_hash_primejump;
write_hash_openmp = write_hash_primejump_openmp;
} else if (hash_report_level == 1){
read_hash = read_hash_primejump_report_level_1;
write_hash_openmp = write_hash_primejump_openmp_report_level_1;
} else if (hash_report_level == 2){
read_hash = read_hash_primejump_report_level_2;
write_hash_openmp = write_hash_primejump_openmp_report_level_2;
} else if (hash_report_level == 3){
read_hash = read_hash_primejump_report_level_3;
write_hash_openmp = write_hash_primejump_openmp_report_level_3;
}
}
} else {
hashtablesize = perfect_hash_size;
hash = (int *)genvector(hashtablesize,sizeof(int));
#ifdef _OPENMP
#pragma omp parallel for
#endif
if (do_init) {
for (uint ii = 0; ii<hashtablesize; ii++){
hash[ii] = -1;
}
}
read_hash = read_hash_perfect;
write_hash_openmp = write_hash_perfect_openmp;
}
if (hash_report_level >= 2) {
printf("Hash table size %u perfect hash table size %u memory savings %d by percentage %lf\n",
hashtablesize,isize*jsize,(int)isize*(int)jsize-(int)hashtablesize,
(double)hashtablesize/(double)(isize*jsize) * 100.0);
}
return(hash);
}
#endif
void write_hash_perfect(uint ic, ulong hashkey, int *hash){
hash[hashkey] = ic;
}
void write_hash_linear(uint ic, ulong hashkey, int *hash){
uint hashloc;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize);
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_linear_report_level_1(uint ic, ulong hashkey, int *hash){
uint hashloc;
hash_ncells++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize){
write_hash_collisions++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_linear_report_level_2(uint ic, ulong hashkey, int *hash){
uint hashloc;
hash_ncells++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize){
write_hash_collisions++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_linear_report_level_3(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
hash_ncells++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc++,hashloc = hashloc%hashtablesize){
int hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
icount++;
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_quadratic(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize) {
icount++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_quadratic_report_level_1(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
hash_ncells++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_quadratic_report_level_2(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
hash_ncells++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_quadratic_report_level_3(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
hash_ncells++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
int hashloctmp = hashloc+icount*icount;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_primejump(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
uint jump = 1+hashkey%hash_jump_prime;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize) {
icount++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_primejump_report_level_1(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
uint jump = 1+hashkey%hash_jump_prime;
hash_ncells++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_primejump_report_level_2(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
uint jump = 1+hashkey%hash_jump_prime;
hash_ncells++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
void write_hash_primejump_report_level_3(uint ic, ulong hashkey, int *hash){
int icount = 0;
uint hashloc;
uint jump = 1+hashkey%hash_jump_prime;
hash_ncells++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
int hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
}
write_hash_collisions += icount;
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
}
#ifdef _OPENMP
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_perfect_openmp(uint ic, ulong hashkey, int *hash){
#else
void write_hash_perfect_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
hash[hashkey] = ic;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_linear_openmp(uint ic, ulong hashkey, int *hash){
#else
void write_hash_linear_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc++;
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
hashloc++;
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_linear_openmp_report_level_1(uint ic, ulong hashkey, int *hash){
#else
void write_hash_linear_openmp_report_level_1(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc++;
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
icount++;
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
hashloc++;
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
icount++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_linear_openmp_report_level_2(uint ic, ulong hashkey, int *hash){
#else
void write_hash_linear_openmp_report_level_2(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc++;
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
icount++;
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
hashloc++;
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
icount++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_linear_openmp_report_level_3(uint ic, ulong hashkey, int *hash){
#else
void write_hash_linear_openmp_report_level_3(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc++;
hashloc %= hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
icount++;
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
hashloc++;
hashloc = hashloc%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
omp_set_lock(&(lock[hashloc]));
icount++;
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_quadratic_openmp(uint ic, ulong hashkey, int *hash){
#else
void write_hash_quadratic_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*icount);
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*icount);
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_quadratic_openmp_report_level_1(uint ic, ulong hashkey, int *hash){
#else
void write_hash_quadratic_openmp_report_level_1(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*icount);
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*icount);
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_quadratic_openmp_report_level_2(uint ic, ulong hashkey, int *hash){
#else
void write_hash_quadratic_openmp_report_level_2(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*icount);
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*icount);
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_quadratic_openmp_report_level_3(uint ic, ulong hashkey, int *hash){
#else
void write_hash_quadratic_openmp_report_level_3(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*icount);
hashloc %= hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*icount);
hashloc = hashloc%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_primejump_openmp(uint ic, ulong hashkey, int *hash){
#else
void write_hash_primejump_openmp(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint jump = 1+hashkey%hash_jump_prime;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*jump);
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*jump);
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_primejump_openmp_report_level_1(uint ic, ulong hashkey, int *hash){
#else
void write_hash_primejump_openmp_report_level_1(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint jump = 1+hashkey%hash_jump_prime;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*jump);
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*jump);
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_primejump_openmp_report_level_2(uint ic, ulong hashkey, int *hash){
#else
void write_hash_primejump_openmp_report_level_2(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint jump = 1+hashkey%hash_jump_prime;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*jump);
hashloc %= hashtablesize;
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*jump);
hashloc = hashloc%hashtablesize;
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void write_hash_primejump_openmp_report_level_3(uint ic, ulong hashkey, int *hash){
#else
void write_hash_primejump_openmp_report_level_3(uint ic, ulong hashkey, int *hash, omp_lock_t *lock){
#endif
int icount = 0;
uint jump = 1+hashkey%hash_jump_prime;
uint hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
int MaxTries = 1000;
int old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
//printf("old_key is %d\n",old_key);
for (icount = 1; old_key != hashkey && old_key != -1 && icount < MaxTries; icount++){
hashloc+=(icount*jump);
hashloc %= hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
old_key = __sync_val_compare_and_swap(&hash[2*hashloc], -1, hashkey);
}
if (icount < MaxTries) hash[2*hashloc+1] = ic;
#else
omp_set_lock(&(lock[hashloc]));
while (hash[2*hashloc] != -1 && hash[2*hashloc]!= (int)hashkey){
omp_unset_lock(&(lock[hashloc]));
icount++;
hashloc+=(icount*jump);
hashloc = hashloc%hashtablesize;
printf("%d: cell %d hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,ic,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
omp_set_lock(&(lock[hashloc]));
}
hash[2*hashloc] = hashkey;
hash[2*hashloc+1] = ic;
omp_unset_lock(&(lock[hashloc]));
#endif
#pragma omp atomic
write_hash_collisions += icount;;
#pragma omp atomic
hash_ncells++;
}
#endif
int read_hash_perfect(ulong hashkey, int *hash){
return(hash[hashkey]);
}
int read_hash_linear(ulong hashkey, int *hash){
int hashval = -1;
uint hashloc;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize);
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_linear_report_level_1(ulong hashkey, int *hash){
int hashval = -1;
uint hashloc;
int icount=0;
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_linear_report_level_2(ulong hashkey, int *hash){
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_linear_report_level_3(ulong hashkey, int *hash){
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
hash_queries++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
uint hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_quadratic(ulong hashkey, int *hash){
int hashval = -1;
uint hashloc;
int icount=0;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
}
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_quadratic_report_level_1(ulong hashkey, int *hash){
int hashval = -1;
uint hashloc;
int icount=0;
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_quadratic_report_level_2(ulong hashkey, int *hash){
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_quadratic_report_level_3(ulong hashkey, int *hash){
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
hash_queries++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
uint hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_primejump(ulong hashkey, int *hash){
int hashval = -1;
uint hashloc;
int icount=0;
uint jump = 1+hashkey%hash_jump_prime;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
}
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_primejump_report_level_1(ulong hashkey, int *hash){
int hashval = -1;
uint hashloc;
int icount=0;
uint jump = 1+hashkey%hash_jump_prime;
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_primejump_report_level_2(ulong hashkey, int *hash){
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
uint jump = 1+hashkey%hash_jump_prime;
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
int read_hash_primejump_report_level_3(ulong hashkey, int *hash){
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
uint jump = 1+hashkey%hash_jump_prime;
hash_queries++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
uint hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
void compact_hash_delete(int *hash){
read_hash = NULL;
free(hash);
}
#ifdef _OPENMP
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
void compact_hash_delete_openmp(int *hash){
#else
void compact_hash_delete_openmp(int *hash, omp_lock_t *lock){
#endif
read_hash = NULL;
genvectorfree((void *)hash);
#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
if (hash_method != PERFECT_HASH){
for (uint i = 0; i<hashtablesize; i++){
omp_destroy_lock(&(lock[i]));
}
free(lock);
}
#endif
hash_method = METHOD_UNSET;
}
#endif
void write_hash_collision_report(void){
if (hash_method == PERFECT_HASH) return;
if (hash_report_level == 1) {
write_hash_collisions_runsum += (double)write_hash_collisions/(double)hash_ncells;
write_hash_collisions_count++;
} else if (hash_report_level >= 2) {
printf("Write hash collision report -- collisions per cell %lf, collisions %d cells %d\n",(double)write_hash_collisions/(double)hash_ncells,write_hash_collisions,hash_ncells);
}
}
void read_hash_collision_report(void){
if (hash_method == PERFECT_HASH) return;
if (hash_report_level == 1) {
read_hash_collisions_runsum += (double)read_hash_collisions/(double)hash_queries;
read_hash_collisions_count++;
} else if (hash_report_level >= 2) {
printf("Read hash collision report -- collisions per cell %lf, collisions %d cells %d\n",(double)read_hash_collisions/(double)hash_queries,read_hash_collisions,hash_queries);
hash_queries = 0;
read_hash_collisions = 0;
}
}
void final_hash_collision_report(void){
if (hash_report_level >= 1 && read_hash_collisions_count > 0) {
printf("Final hash collision report -- write/read collisions per cell %lf/%lf\n",write_hash_collisions_runsum/(double)write_hash_collisions_count,read_hash_collisions_runsum/(double)read_hash_collisions_count);
}
}
#ifdef HAVE_OPENCL
char *get_hash_kernel_source_string(void){
return (char*) simplehash_kern_source;
}
cl_kernel init_kernel;
void hash_lib_init(cl_context context){
cl_int error;
cl_program program = clCreateProgramWithSource(context, 1, (const char **)&simplehashlib_kern_source, NULL, &error);
if (error != CL_SUCCESS){
printf("clCreateProgramWithSource returned an error %d at line %d in file %s\n", error,__LINE__,__FILE__);
}
size_t nReportSize;
char* BuildReport;
#ifdef HAVE_CL_DOUBLE
error = clBuildProgram(program, 0, NULL, "-DHAVE_CL_DOUBLE", NULL, NULL);
#else
error = clBuildProgram(program, 0, NULL, "-DNO_CL_DOUBLE -cl-single-precision-constant", NULL, NULL);
#endif
if (error != CL_SUCCESS){
printf("clBuildProgram returned an error %d at line %d in file %s\n", error,__LINE__,__FILE__);
cl_device_id device;
clGetContextInfo(context, CL_CONTEXT_DEVICES, 1, &device, NULL);
if (context == NULL){
printf("EZCL_DEVTYPE_INIT: Failed to find device and setup context in file %s at line %d\n", __FILE__, __LINE__);
exit(-1); // No device is available, something is wrong
}
error = clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, NULL, &nReportSize);
if (error != CL_SUCCESS) {
switch (error){
case CL_INVALID_DEVICE:
printf("Invalid device in clProgramBuildInfo\n");
break;
case CL_INVALID_VALUE:
printf("Invalid value in clProgramBuildInfo\n");
break;
case CL_INVALID_PROGRAM:
printf("Invalid program in clProgramBuildInfo\n");
break;
}
}
BuildReport = (char *)malloc(nReportSize);
error = clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, nReportSize, BuildReport, NULL);
if (error != CL_SUCCESS) {
switch (error){
case CL_INVALID_DEVICE:
printf("Invalid device in clProgramBuildInfo\n");
break;
case CL_INVALID_VALUE:
printf("Invalid value in clProgramBuildInfo\n");
break;
case CL_INVALID_PROGRAM:
printf("Invalid program in clProgramBuildInfo\n");
break;
}
}
printf("%s\n", BuildReport);
}
init_kernel = clCreateKernel(program, "init_kern", &error);
if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__);
}
cl_mem hash_init (int hash_size, int TILE_SIZE, int do_init, cl_context context, cl_command_queue queue, long *gpu_time)
{
cl_int error;
long gpu_time_start, gpu_time_end;
cl_mem hash_buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, hash_size*sizeof(int), NULL, &error);
if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__);
//init to -1
if (do_init) {
error = clSetKernelArg(init_kernel, 0, sizeof(cl_uint), &hash_size);
if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__);
error = clSetKernelArg(init_kernel, 1, sizeof(cl_mem), (void*)&hash_buffer);
if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__);
size_t global_work_size;
size_t local_work_size;
local_work_size = TILE_SIZE;
global_work_size = ((hash_size+local_work_size-1)/local_work_size)*local_work_size;
cl_event hash_init_event;
error = clEnqueueNDRangeKernel(queue, init_kernel, 1, 0, &global_work_size, &local_work_size, 0, NULL, &hash_init_event);
if (error != CL_SUCCESS) printf("Error is %d at line %d\n",error,__LINE__);
clWaitForEvents(1,&hash_init_event);
clGetEventProfilingInfo(hash_init_event, CL_PROFILING_COMMAND_START, sizeof(gpu_time_start), &gpu_time_start, NULL);
clGetEventProfilingInfo(hash_init_event, CL_PROFILING_COMMAND_END, sizeof(gpu_time_end), &gpu_time_end, NULL);
*gpu_time = gpu_time_end - gpu_time_start;
clReleaseEvent(hash_init_event);
}
//if (DETAILED_TIMING) printf("\n\tinit %.6lf,", (double)(gpu_time_end - gpu_time_start)*1.0e-9);
return(hash_buffer);
}
#endif
int read_dev_hash(int hash_method, ulong hashtablesize, ulong AA, ulong BB, ulong hashkey, int *hash){
//int hash_report_level = 3;
int max_collisions_allowed = 1000;
int hashval = -1;
uint hashloc;
int icount=0;
if (hash_method == PERFECT_HASH) {
return(hash[hashkey]);
}
if (hash_method == LINEAR) {
if (hash_report_level == 0) {
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
}
} else if (hash_report_level == 1) {
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
}
read_hash_collisions += icount;
} else if (hash_report_level == 2) {
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
} else if (hash_report_level == 3) {
hash_queries++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc++,hashloc = hashloc%hashtablesize){
icount++;
uint hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
} else {
printf("Error -- Illegal value of hash_report_level %d\n",hash_report_level);
exit(1);
}
} else if (hash_method == QUADRATIC) {
if (hash_report_level == 0) {
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
}
} else if (hash_report_level == 1) {
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
}
read_hash_collisions += icount;
} else if (hash_report_level == 2) {
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
} else if (hash_report_level == 3) {
hash_queries++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*icount),hashloc = hashloc%hashtablesize){
icount++;
uint hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
} else {
printf("Error -- Illegal value of hash_report_level %d\n",hash_report_level);
exit(1);
}
} else if (hash_method == PRIME_JUMP) {
uint jump = 1+hashkey%hash_jump_prime;
if (hash_report_level == 0) {
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
}
} else if (hash_report_level == 1) {
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
}
read_hash_collisions += icount;
} else if (hash_report_level == 2) {
hash_queries++;
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
} else if (hash_report_level == 3) {
hash_queries++;
hashloc = (hashkey*AA+BB)%prime%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloc,hash[2*hashloc],hashkey,hashkey%hash_stride,hashkey/hash_stride);
for (hashloc = (hashkey*AA+BB)%prime%hashtablesize; hash[2*hashloc] != (int)hashkey && hash[2*hashloc] != -1; hashloc+=(icount*jump),hashloc = hashloc%hashtablesize){
icount++;
uint hashloctmp = hashloc+1;
hashloctmp = hashloctmp%hashtablesize;
printf("%d: hashloc is %d hash[2*hashloc] = %d hashkey %lu ii %lu jj %lu\n",icount,hashloctmp,hash[2*hashloctmp],hashkey,hashkey%hash_stride,hashkey/hash_stride);
if (icount > max_collisions_allowed) {
printf("Error -- too many read hash collisions\n");
exit(0);
}
}
read_hash_collisions += icount;
} else {
printf("Error -- Illegal value of hash_report_level %d\n",hash_report_level);
exit(1);
}
} else {
printf("Error -- Illegal value of hash_method %d\n",hash_method);
exit(1);
}
if (hash[2*hashloc] != -1) hashval = hash[2*hashloc+1];
return(hashval);
}
|
embedded_skin_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Ruben Zorrilla
//
#if !defined(KRATOS_GENERATE_EMBEDDED_SKIN_UTILITY_H_INCLUDED )
#define KRATOS_GENERATE_EMBEDDED_SKIN_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "geometries/geometry_data.h"
#include "modified_shape_functions/modified_shape_functions.h"
#include "utilities/binbased_fast_point_locator.h"
#include "utilities/divide_geometry.h"
#include "utilities/math_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Utility to compute the skin representation from a distance function.
/** Provided either a continuous or discontinuous distance function, this
* utility reconstructs the skin representation coming from such distance
* function. This is done by computing the element intersections and saving
* them in an empty provided model part. Note that such skin representation
* is discontinuous even for a provided continuous distance field.
*/
template<std::size_t TDim>
class KRATOS_API(KRATOS_CORE) EmbeddedSkinUtility
{
public:
///@name Type Definitions
///@{
/// Pointer definition of EmbeddedSkinUtility
KRATOS_CLASS_POINTER_DEFINITION(EmbeddedSkinUtility);
typedef std::unordered_map<
Node<3>::Pointer,
std::tuple< const Element::Pointer, const unsigned int >,
SharedPointerHasher<Node<3>::Pointer>,
SharedPointerComparator<Node<3>::Pointer> > EdgeNodesMapType;
///@}
///@name Enum's
///@{
enum LevelSetTypeEnum
{
Continuous = 1,
Discontinuous = 2
};
///@}
///@name Life Cycle
///@{
/// Default constructor.
EmbeddedSkinUtility(
ModelPart &rModelPart,
ModelPart &rSkinModelPart,
const std::string LevelSetType = "continuous",
const std::vector<std::string> InterpolatedSkinVariables = {}) :
mrModelPart(rModelPart),
mrSkinModelPart(rSkinModelPart),
mLevelSetType(LevelSetType == "continuous" ? Continuous : Discontinuous),
mrConditionPrototype(KratosComponents<Condition>::Get(this->GetConditionType())),
mInterpolatedSkinVariables(InterpolatedSkinVariables) {};
/// Destructor.
virtual ~EmbeddedSkinUtility() = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Call to generate the embedded skin model part
* This method collects all the operations required to generate
* the embedded skin model part. The new geometries will be stored
* inside the skin model part provided in the constructor.
*/
void GenerateSkin();
/**
* @brief InterpolateMeshVariableToSkin double specialization
* Double type specialization of the InterpolateMeshVariableToSkin method
* @param rMeshVariable background mesh origin variable
* @param rSkinVariable skin mesh destination variable
*/
void InterpolateMeshVariableToSkin(
const Variable<double> &rMeshVariable,
const Variable<double> &rSkinVariable);
/**
* @brief InterpolateMeshVariableToSkin array specialization
* Array type specialization of the InterpolateMeshVariableToSkin method
* @param rMeshVariable background mesh origin variable
* @param rSkinVariable skin mesh destination variable
*/
void InterpolateMeshVariableToSkin(
const Variable<array_1d<double,3>> &rMeshVariable,
const Variable<array_1d<double,3>> &rSkinVariable);
/**
* @brief Discontinuous InterpolateMeshVariableToSkin double specialization
* Double type specialization of the InterpolateMeshVariableToSkin method
* for discontinuous level set type formulation
* @param rMeshVariable background mesh origin variable
* @param rSkinVariable skin mesh destination variable
* @param rInterfaceSide interface side ("positive" or "negative")
*/
void InterpolateDiscontinuousMeshVariableToSkin(
const Variable<double> &rMeshVariable,
const Variable<double> &rSkinVariable,
const std::string &rInterfaceSide);
/**
* @brief Discontinuous InterpolateMeshVariableToSkin array specialization
* Array type specialization of the InterpolateMeshVariableToSkin method
* for discontinuous level set type formulation
* @param rMeshVariable background mesh origin variable
* @param rSkinVariable skin mesh destination variable
* @param rInterfaceSide interface side ("positive" or "negative")
*/
void InterpolateDiscontinuousMeshVariableToSkin(
const Variable<array_1d<double,3>> &rMeshVariable,
const Variable<array_1d<double,3>> &rSkinVariable,
const std::string &rInterfaceSide);
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart &mrModelPart;
ModelPart &mrSkinModelPart;
EdgeNodesMapType mEdgeNodesMap;
const LevelSetTypeEnum mLevelSetType;
const Condition &mrConditionPrototype;
std::vector<std::string> mInterpolatedSkinVariables;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief Geometry clear operation
* This method is called before any construction of the skin.
* It removes the existent nodes, elements and conditions in
* the provided skin model part.
*/
void Clear();
/**
* @brief Computes the skin entities of one element
* For an intersected element, this method computes the skin
* intersection geometries.
* @param pElement element of interest
* @param rNodalDistances vector containing the element node distances
* @param rTempNodeId temporal id for the new nodes
* @param rTempCondId temporal id for the new conditions
* @param pCondProp pointer to the properties for the new skin conditions
* @param rNewNodesVect vector to temporary store the new skin nodes
* @param rNewCondsVect vector to temporary store the new skin conditions
*/
void ComputeElementSkin(
const Element::Pointer pElement,
const Vector &rNodalDistances,
unsigned int &rTempNodeId,
unsigned int &rTempCondId,
Properties::Pointer pCondProp,
ModelPart::NodesContainerType &rNewNodesVect,
ModelPart::ConditionsContainerType &rNewCondsVect);
/**
* @brief Checks if an element is split
* This method checks if an element geometry is split
* @param rGeometry geometry of the element of interest
* @param rNodalDistances vector containing the element node distances
* @return true if the element is split
* @return false if the element is not split
*/
bool inline ElementIsSplit(
const Geometry<Node<3>> &rGeometry,
const Vector &rNodalDistances);
/**
* @brief InterpolateMeshVariableToSkin specialization method
* For a provided set of variables, this method interpolates the values
* from the brackground fluid mesh to an embedded skin mesh. This can
* be done for either the positive or negative sides of the interface.
* @tparam TVarType variable type of the variable to be interpolated
* @param rMeshVariable variable in the background mesh to interpolate from
* @param rSkinVariable variable in the skin model part to interpolate to
* @param rInterfaceSide interface side in where the shape functions
* are to be computed. Must be either "positive" or "negative"
*/
template<class TVarType>
void InterpolateMeshVariableToSkinSpecialization(
const Variable<TVarType> &rMeshVariable,
const Variable<TVarType> &rSkinVariable,
const std::string &rInterfaceSide = "positive")
{
// Check requested variables
KRATOS_ERROR_IF((mrModelPart.NodesBegin())->SolutionStepsDataHas(rMeshVariable) == false)
<< "Mesh model part solution step data missing variable: " << rMeshVariable << std::endl;
KRATOS_ERROR_IF((mrSkinModelPart.NodesBegin())->SolutionStepsDataHas(rSkinVariable) == false)
<< "Generated skin model part solution step data missing variable: " << rSkinVariable << std::endl;
// Check that the mesh model part has elements
KRATOS_ERROR_IF(mrModelPart.NumberOfElements() == 0) << "Mesh model part has no elements.";
// Loop the edge intersection nodes to set their values
unsigned int i_edge;
Element::Pointer p_elem;
#pragma omp parallel for private (i_edge, p_elem)
for (int i_node = 0; i_node < static_cast<int>(mrSkinModelPart.NumberOfNodes()); ++i_node) {
// Get the current node
auto it_node = mrSkinModelPart.NodesBegin() + i_node;
Node<3>::Pointer p_node = &(*it_node);
// Search for the current node in the intersected edges map
const auto i_node_info = mEdgeNodesMap.find(p_node);
if (i_node_info != mEdgeNodesMap.end()){
// Get the cut node info from the map tuple iterator
std::tie(p_elem, i_edge) = std::get<1>(*i_node_info);
// Set the modified shape functions for the parent element
const auto p_elem_geom = p_elem->pGetGeometry();
const auto elem_dist = this->SetDistancesVector(*p_elem);
const auto p_mod_sh_func = pCreateModifiedShapeFunctions(p_elem_geom, elem_dist);
// Get interface modified shape function values
const auto edge_sh_func = this->GetModifiedShapeFunctionsValuesOnEdge(
p_mod_sh_func,
rInterfaceSide);
// Compute the interpolation
const auto edge_N = row(edge_sh_func, i_edge);
const auto &r_elem_geom = p_elem->GetGeometry();
auto &r_value = it_node->FastGetSolutionStepValue(rSkinVariable);
r_value = rSkinVariable.Zero();
for (unsigned int i_elem_node = 0; i_elem_node < r_elem_geom.PointsNumber(); ++i_elem_node) {
r_value += edge_N[i_elem_node] * r_elem_geom[i_elem_node].FastGetSolutionStepValue(rMeshVariable);
}
} else{
KRATOS_ERROR << "Intersected edge node " << it_node->Id() << " not found in intersected edges nodes map" << std::endl;
}
}
};
/**
* @brief Renumber and saves the new skin entities
* This method renumbers the new skin geometrical entities (MPI compatible)
* and add them to the provided skin model part.
* @param rNewNodesVect vector that stores the new skin nodes
* @param rNewCondsVect vector that stores the new skin conditions
*/
void RenumberAndAddSkinEntities(
const ModelPart::NodesContainerType &rNewNodesVect,
const ModelPart::ConditionsContainerType &rNewCondsVect);
/**
* @brief Set the Distances Vector object
* For a given element, this method sets the vector containing the
* element node distances.
* @param ItElem iterator to the element of interest
* @return const Vector vector containing the element node distances
*/
const Vector SetDistancesVector(const Element &rElement);
/**
* Sets the the divide geometry utility according to the geometry type.
* @param pGeometry Pointer to the element geometry
* @param rNodalDistances Vector containing the distance values
* @return A pointer to the divide geometry utility
*/
DivideGeometry::Pointer SetDivideGeometryUtility(
const Geometry<Node<3>> &rGeometry,
const Vector &rNodalDistances);
/**
* Creates the new interface condition geometry
* @param rOriginGeometryType Interface subgeometry type
* @param rNewNodesArray Nodes that conform the new interface geometry
* @return A pointer to the new geometry
*/
Geometry< Node<3> >::Pointer pCreateNewConditionGeometry(
const GeometryData::KratosGeometryType &rOriginGeometryType,
const Condition::NodesArrayType &rNewNodesArray);
/**
* @brief Creates a pointer to a new skin condition
* From the split pattern of an intersected element, this method
* creates and returns a pointer to a new skin condition.
* @param rOriginGeometryType GeometryType of the condition to be created
* @param rNewNodesArray array containing pointers to the nodes that will
* conform the condition
* @param rConditionId new condition identifier
* @param pConditionProperties pointer to the new condition properties
* @return Condition::Pointer pointer to a new skin condition
*/
Condition::Pointer pCreateNewCondition(
const GeometryData::KratosGeometryType &rOriginGeometryType,
const Condition::NodesArrayType &rNewNodesArray,
const unsigned int &rConditionId,
const Properties::Pointer pConditionProperties);
/**
* @brief Get the condition type
* Depending on the dimension template argument, this method returns
* the condition type name (Condition2D2N or SurfaceCondition3D3N)
* @return std::sting condition type name
*/
static const std::string GetConditionType();
/**
* @brief Set the Skin Entities Properties
* This method checks which is the last properties id. and
* sets a new one accordingly to be used as skin conditions property
* @return Properties::Pointer pointer to the new skin entities property
*/
Properties::Pointer SetSkinEntitiesProperties();
/**
* @brief Creates a pointer to the Modified Shape Functions
* For an intersected element, sets the modified shape functions utility
* @param pGeometry Pointer to the intersected element geometry
* @param rNodalDistances Vector containing the nodal distances
* @return ModifiedShapeFunctions::UniquePointer Unique pointer
* to the current element modified shape functions utility
*/
ModifiedShapeFunctions::UniquePointer pCreateModifiedShapeFunctions(
const Geometry<Node<3>>::Pointer pGeometry,
const Vector& rNodalDistances);
/**
* @brief Get the Modified Shape Functions Values object
* This method returns the shape function values in an element intersection
* @param rpModifiedShapeFunctions pointer to the modified shape functions util
* @param rInterfaceSide interface side in where the shape functions
* are to be computed. Must be either "positive" or "negative"
* @return Matrix matrix containing the split shape function values
*/
Matrix GetModifiedShapeFunctionsValues(
const ModifiedShapeFunctions::UniquePointer &rpModifiedShapeFunctions,
const std::string &rInterfaceSide) const;
/**
* @brief Get the Modified Shape Functions Values On Edge object
* This method returns the shape function values in the intersected edges
* @param rpModifiedShapeFunctions pointer to the modified shape functions util
* @param rInterfaceSide interface side in where the shape functions
* are to be computed. Must be either "positive" or "negative"
* @return Matrix matrix containing the split shape function values
*/
Matrix GetModifiedShapeFunctionsValuesOnEdge(
const ModifiedShapeFunctions::UniquePointer &rpModifiedShapeFunctions,
const std::string &rInterfaceSide) const;
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
EmbeddedSkinUtility& operator=(EmbeddedSkinUtility const& rOther) = delete;
/// Copy constructor.
EmbeddedSkinUtility(EmbeddedSkinUtility const& rOther) = delete;
///@}
}; // Class EmbeddedSkinUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos.
#endif // KRATOS_GENERATE_EMBEDDED_SKIN_UTILITY_H_INCLUDED defined
|
GB_unop__identity_fc64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_int64
// op(A') function: GB_unop_tran__identity_fc64_int64
// C type: GxB_FC64_t
// A type: int64_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_int64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_bool
// op(A') function: GB_tran__lnot_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
optim10_matmul.c | #include <gemm.h>
#define A(i,j) a[(j)*lda+(i)]
#define B(i,j) b[(j)*ldb+(i)]
#define C(i,j) c[(j)*ldc+(i)]
void serial_init(int m, int n, double * a, int lda){
int count = 1;
for(int j=0;j<n;j++){
for(int i=0;i<m;i++)
A(i,j) = count++;
}
}
void random_init(int m, int n, double * a, int lda){
for(int j=0;j<n;j++){
for(int i=0;i<m;i++)
A(i,j) = 2.0 * drand48() - 1.0;
}
}
void display(double * matrix, int m, int n){
for(int j=0;j<n;j++){
for(int i=0;i<m;i++){
printf("%f ",matrix[j*m+i]);
}
printf("\n");
}
return;
}
void AddDot4x4(int k, double *a, int lda, double *b, int ldb, double *c, int ldc){
register double c_00, c_01, c_02, c_03, c_10, c_11, c_12, c_13, c_20, c_21, c_22, c_23, c_30, c_31, c_32, c_33, a_0p, a_1p, a_2p, a_3p, b_0p_reg, b_1p_reg, b_2p_reg, b_3p_reg;
double * b_0p_ptr, *b_1p_ptr, *b_2p_ptr, *b_3p_ptr;
c_00 = 0.0;
c_01 = 0.0;
c_02 = 0.0;
c_03 = 0.0;
c_10 = 0.0;
c_11 = 0.0;
c_12 = 0.0;
c_13 = 0.0;
c_20 = 0.0;
c_21 = 0.0;
c_22 = 0.0;
c_23 = 0.0;
c_30 = 0.0;
c_31 = 0.0;
c_32 = 0.0;
c_33 = 0.0;
b_0p_ptr = &B(0,0);
b_1p_ptr = &B(0,1);
b_2p_ptr = &B(0,2);
b_3p_ptr = &B(0,3);
// #pragma omp parallel for num_threads(4)
for(int p=0;p<k;p++){
a_0p = A(0,p);
a_1p = A(1,p);
a_2p = A(2,p);
a_3p = A(3,p);
b_0p_reg = *b_0p_ptr++;
b_1p_reg = *b_1p_ptr++;
b_2p_reg = *b_2p_ptr++;
b_3p_reg = *b_3p_ptr++;
c_00 += a_0p * b_0p_reg;
c_10 += a_1p * b_0p_reg;
c_01 += a_0p * b_1p_reg;
c_11 += a_1p * b_1p_reg;
c_02 += a_0p * b_2p_reg;
c_12 += a_1p * b_2p_reg;
c_03 += a_0p * b_3p_reg;
c_13 += a_1p * b_3p_reg;
c_20 += a_2p * b_0p_reg;
c_30 += a_3p * b_0p_reg;
c_21 += a_2p * b_1p_reg;
c_31 += a_3p * b_1p_reg;
c_22 += a_2p * b_2p_reg;
c_32 += a_3p * b_2p_reg;
c_23 += a_2p * b_3p_reg;
c_33 += a_3p * b_3p_reg;
}
C(0,0) += c_00;
C(0,1) += c_01;
C(0,2) += c_02;
C(0,3) += c_03;
C(1,0) += c_10;
C(1,1) += c_11;
C(1,2) += c_12;
C(1,3) += c_13;
C(2,0) += c_20;
C(2,1) += c_21;
C(2,2) += c_22;
C(2,3) += c_23;
C(3,0) += c_30;
C(3,1) += c_31;
C(3,2) += c_32;
C(3,3) += c_33;
}
void matmul(int m, int n, int k, double * a, int lda, double * b, int ldb, double * c, int ldc){
/*
Computes the matrix multiplication of A and B and stores in C.
C = A*B + C
Arguments
---------
m,n,k : Specifies matrix dimensions
a : pointer to first matrix
b : pointer to second matrix
c : pointer to the resultant matrix
lda : leading dimension of matrix a
ldb : leading dimension of matrix b
ldc : leading dimension of matrix c
Return
------
None
*/
if(a==NULL || b==NULL || c==NULL){
printf("Argument Error : One of the input arguments to matmul() was NULL\n");
return;
}
// #pragma omp parallel for num_threads(4)
for(int j=0; j<n; j+=4){ //Loop over the columns of C with stride 4
for(int i=0;i<m;i+=4){ //Loop over the rows of C
AddDot4x4(k,&A(i,0),lda,&B(0,j),ldb,&C(i,j),ldc);
}
}
return;
}
int main(){
int m = 2000;
int n = 2000;
int k = 2000;
double * A = (double*)calloc(m*k,sizeof(double));
double * B = (double*)calloc(k*n,sizeof(double));
double * C = (double*)calloc(m*n,sizeof(double));
struct timeval start,finish;
double gflops = 2.0 * m*n*k * 1.0e-09;
srand((unsigned)time(NULL));
if(A==NULL || B==NULL || C==NULL){
printf("Out of Memory!\n");
exit(EXIT_FAILURE);
}
random_init(m,k,A,m);
random_init(k,n,B,k);
gettimeofday(&start, NULL);
matmul(m,n,k,A,m,B,k,C,m);
gettimeofday(&finish, NULL);
double duration = ((double)(finish.tv_sec-start.tv_sec)*1000000 + (double)(finish.tv_usec-start.tv_usec)) / 1000000;
if(m<=8){
printf("Matrix A : \n");
display(A,m,k);
printf("Matrix B : \n");
display(B,k,n);
printf("Dot Product Result : \n");
display(C,m,n);
}
printf("Optimization 10 : Dot product took %f seconds GFLOPS : %f\n",duration,gflops/duration);
return 0;
} |
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) cluster->id,q);
break;
}
}
if (cluster == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireMagickMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
return(0.0);
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireQuantumMemory(256,sizeof(*derivative));
second_derivative=(double *) AcquireQuantumMemory(256,
sizeof(*second_derivative));
if ((derivative == (double *) NULL) ||
(second_derivative == (double *) NULL))
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDerivatives");
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
return(0.0);
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-15,16)),ceild(24*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(12*t1+Nx+21,64)),floord(24*t2+Nx+20,64)),floord(24*t3+Nx+20,64)),floord(24*t1-24*t2+Nz+Nx+19,64));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),64*t4+62),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
core_single_cpu_lcg.c | /* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; -*- */
/*
* This code has been contributed by the DARPA HPCS program. Contact
* David Koester <dkoester@mitre.org> or Bob Lucas <rflucas@isi.edu>
* if you have questions.
*
* GUPS (Giga UPdates per Second) is a measurement that profiles the memory
* architecture of a system and is a measure of performance similar to MFLOPS.
* The HPCS HPCchallenge RandomAccess benchmark is intended to exercise the
* GUPS capability of a system, much like the LINPACK benchmark is intended to
* exercise the MFLOPS capability of a computer. In each case, we would
* expect these benchmarks to achieve close to the "peak" capability of the
* memory system. The extent of the similarities between RandomAccess and
* LINPACK are limited to both benchmarks attempting to calculate a peak system
* capability.
*
* GUPS is calculated by identifying the number of memory locations that can be
* randomly updated in one second, divided by 1 billion (1e9). The term "randomly"
* means that there is little relationship between one address to be updated and
* the next, except that they occur in the space of one half the total system
* memory. An update is a read-modify-write operation on a table of 64-bit words.
* An address is generated, the value at that address read from memory, modified
* by an integer operation (add, and, or, xor) with a literal value, and that
* new value is written back to memory.
*
* We are interested in knowing the GUPS performance of both entire systems and
* system subcomponents --- e.g., the GUPS rating of a distributed memory
* multiprocessor the GUPS rating of an SMP node, and the GUPS rating of a
* single processor. While there is typically a scaling of FLOPS with processor
* count, a similar phenomenon may not always occur for GUPS.
*
* For additional information on the GUPS metric, the HPCchallenge RandomAccess
* Benchmark,and the rules to run RandomAccess or modify it to optimize
* performance -- see http://icl.cs.utk.edu/hpcc/
*
*/
/*
* This file contains the computational core of the single cpu version
* of GUPS. The inner loop should easily be vectorized by compilers
* with such support.
*
* This core is used by both the single_cpu and star_single_cpu tests.
*/
#include <hpcc.h>
#include "RandomAccess.h"
/* Number of updates to table (suggested: 4x number of table entries) */
#define NUPDATE (4 * TableSize)
static void
RandomAccessUpdate_LCG(u64Int TableSize, u64Int *Table) {
u64Int i;
u64Int ran[128]; /* Current random numbers */
int j, logTableSize;
/* Perform updates to main table. The scalar equivalent is:
*
* u64Int ran;
* ran = 1;
* for (i=0; i<NUPDATE; i++) {
* ran = LCG_MUL64 * ran + LCG_ADD64;
* table[ran >> (64 - logTableSize)] ^= ran;
* }
*/
for (j=0; j<128; j++)
ran[j] = HPCC_starts_LCG((NUPDATE/128) * j);
logTableSize = 0;
for (i = 1; i < TableSize; i <<= 1)
logTableSize += 1;
for (i=0; i<NUPDATE/128; i++) {
/* #pragma ivdep */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<128; j++) {
ran[j] = LCG_MUL64 * ran[j] + LCG_ADD64;
Table[ran[j] >> (64 - logTableSize)] ^= ran[j];
}
}
}
int
HPCC_RandomAccess_LCG(HPCC_Params *params, int doIO, double *GUPs, int *failure) {
u64Int i;
u64Int temp;
double cputime; /* CPU time to update table */
double realtime; /* Real time to update table */
double totalMem;
u64Int *Table;
u64Int logTableSize, TableSize;
FILE *outFile = NULL;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) {
outFile = stderr;
fprintf( outFile, "Cannot open output file.\n" );
return 1;
}
}
/* calculate local memory per node for the update table */
totalMem = params->HPLMaxProcMem;
totalMem /= sizeof(u64Int);
/* calculate the size of update array (must be a power of 2) */
for (totalMem *= 0.5, logTableSize = 0, TableSize = 1;
totalMem >= 1.0;
totalMem *= 0.5, logTableSize++, TableSize <<= 1)
; /* EMPTY */
Table = HPCC_XMALLOC( u64Int, TableSize );
if (! Table) {
if (doIO) {
fprintf( outFile, "Failed to allocate memory for the update table (" FSTR64 ").\n", TableSize);
fclose( outFile );
}
return 1;
}
params->RandomAccess_LCG_N = (s64Int)TableSize;
/* Print parameters for run */
if (doIO) {
fprintf( outFile, "Main table size = 2^" FSTR64 " = " FSTR64 " words\n", logTableSize,TableSize);
fprintf( outFile, "Number of updates = " FSTR64 "\n", NUPDATE);
}
/* Initialize main table */
for (i=0; i<TableSize; i++) Table[i] = i;
/* Begin timing here */
cputime = -CPUSEC();
realtime = -RTSEC();
RandomAccessUpdate_LCG( TableSize, Table );
/* End timed section */
cputime += CPUSEC();
realtime += RTSEC();
/* make sure no division by zero */
*GUPs = (realtime > 0.0 ? 1.0 / realtime : -1.0);
*GUPs *= 1e-9*NUPDATE;
/* Print timing results */
if (doIO) {
fprintf( outFile, "CPU time used = %.6f seconds\n", cputime);
fprintf( outFile, "Real time used = %.6f seconds\n", realtime);
fprintf( outFile, "%.9f Billion(10^9) Updates per second [GUP/s]\n", *GUPs );
}
/* Verification of results (in serial or "safe" mode; optional) */
temp = 0x1;
for (i=0; i<NUPDATE; i++) {
temp = LCG_MUL64 * temp + LCG_ADD64;
Table[temp >> (64 - (int)logTableSize)] ^= temp;
}
temp = 0;
for (i=0; i<TableSize; i++)
if (Table[i] != i)
temp++;
if (doIO) {
fprintf( outFile, "Found " FSTR64 " errors in " FSTR64 " locations (%s).\n",
temp, TableSize, (temp <= 0.01*TableSize) ? "passed" : "failed");
}
if (temp <= 0.01*TableSize) *failure = 0;
else *failure = 1;
HPCC_free( Table );
if (doIO) {
fflush( outFile );
fclose( outFile );
}
return 0;
}
|
openmp_demo.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Program/openmp_demo: example of user multithreading
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// This demo uses OpenMP, and should work if GraphBLAS is compiled to
// use either OpenMP or pthreads to synchronize multiple user threadds.
// If OpenMP is not available, this program will work fine without it, in a
// single user thread, regardless of the thread mechanism used by GraphBLAS.
#include "GraphBLAS.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#if defined __INTEL_COMPILER
#pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 )
#elif defined __GNUC__
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
#endif
#define NTHREADS 8
#define NTRIALS 10
#define N 6
#define OK(method) \
{ \
GrB_Info info = method ; \
if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE)) \
{ \
printf ("Failure (id: %d, info: %d): %s\n", \
id, info, GrB_error ( )) ; \
/* return to caller (do not use inside critical section) */ \
return (0) ; \
} \
}
//------------------------------------------------------------------------------
// worker
//------------------------------------------------------------------------------
int worker (GrB_Matrix *Ahandle, int id)
{
printf ("\n================= worker %d starts:\n", id) ;
fprintf (stderr, "worker %d\n", id) ;
OK (GrB_Matrix_new (Ahandle, GrB_FP64, N, N)) ;
GrB_Matrix A = *Ahandle ;
// worker generates an intentional error message
GrB_Matrix_setElement_INT32 (A, 42, 1000+id, 1000+id) ;
// print the intentional error generated when the worker started
#pragma omp critical
{
// critical section
printf ("\n----------------- worker %d intentional error:\n", id) ;
printf ("%s\n", GrB_error ( )) ;
}
for (int hammer_hard = 0 ; hammer_hard < NTRIALS ; hammer_hard++)
{
for (int i = 0 ; i < N ; i++)
{
for (int j = 0 ; j < N ; j++)
{
double x = (i+1)*100000 + (j+1)*1000 + id ;
OK (GrB_Matrix_setElement_FP64 (A, x, i, j)) ;
}
}
// force completion
GrB_Index nvals ;
OK (GrB_Matrix_nvals (&nvals, A)) ;
}
// Printing is done in a critical section, just so it is not overly
// jumbled. Each matrix and error will print in a single body of text,
// but the order of the matrices and errors printed will be out of order
// because the critical section does not enforce the order that the
// threads enter.
GrB_Info info2 ;
#pragma omp critical
{
// critical section
printf ("\n----------------- worker %d is done:\n", id) ;
info2 = GxB_Matrix_fprint (A, "A", GxB_SHORT, stdout) ;
}
OK (info2) ;
// worker generates an intentional error message
GrB_Matrix_setElement_INT32 (A, 42, 1000+id, 1000+id) ;
// print the intentional error generated when the worker started
// It should be unchanged.
#pragma omp critical
{
// critical section
printf ("\n----------------- worker %d error should be same:\n", id) ;
printf ("%s\n", GrB_error ( )) ;
}
return (0) ;
}
//------------------------------------------------------------------------------
// openmp_demo main program
//------------------------------------------------------------------------------
int main (int argc, char **argv)
{
fprintf (stderr, "Demo: %s:\n", argv [0]) ;
printf ("Demo: %s:\n", argv [0]) ;
// initialize the mutex
int id = -1 ;
// start GraphBLAS
OK (GrB_init (GrB_NONBLOCKING)) ;
int nthreads ;
OK (GxB_get (GxB_NTHREADS, &nthreads)) ;
fprintf (stderr, "openmp demo, nthreads %d\n", nthreads) ;
// Determine which user-threading model is being used.
GxB_Thread_Model thread_safety ;
GxB_Global_Option_get (GxB_THREAD_SAFETY, &thread_safety) ;
printf ("GraphBLAS is using ") ;
switch (thread_safety)
{
case GxB_THREAD_POSIX :
printf ("a POSIX pthread mutex\n") ;
break ;
case GxB_THREAD_WINDOWS :
printf ("a Windows CriticalSection\n") ;
break ;
case GxB_THREAD_ANSI :
printf ("an ANSI C11 mtx_lock\n") ;
break ;
case GxB_THREAD_OPENMP :
printf ("an OpenMP critical section\n") ;
break ;
default : // GxB_THREAD_NONE
#ifdef _OPENMP
printf ("(nothing! This will fail!)\n") ;
#else
printf ("nothing (OK since user program is single-threaded)\n") ;
#endif
break ;
}
printf ("to synchronize user threads.\n") ;
#ifdef _OPENMP
printf ("User threads in this program are OpenMP threads.\n") ;
#else
printf ("This user program is single threaded.\n") ;
#endif
GrB_Matrix Aarray [NTHREADS] ;
// create the threads
#pragma omp parallel for num_threads(NTHREADS)
for (id = 0 ; id < NTHREADS ; id++)
{
worker (&Aarray [id], id) ;
}
// the master thread prints them again, and frees them
for (int id = 0 ; id < NTHREADS ; id++)
{
GrB_Matrix A = Aarray [id] ;
printf ("\n---- Master prints matrix %d\n", id) ;
OK (GxB_Matrix_fprint (A, "A", GxB_SHORT, stdout)) ;
GrB_Matrix_free (&A) ;
}
// print an error message
printf ("\n\n---- Master thread prints an error message:\n") ;
GrB_Matrix_new (NULL, GrB_FP64, 1, 1) ;
printf ("Error: %s\n", GrB_error ( )) ;
// finish GraphBLAS
GrB_finalize ( ) ;
// finish OpenMP
exit (0) ;
}
|
convolution_sgemm_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
int64_t* tmpptr = tmp.channel(i / 2);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 1 < size; i += 2)
{
const signed char* tmpptr = tmp.channel(i / 2);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl02 = _mm_mullo_epi16(_val0, _w2);
__m128i _sh02 = _mm_mulhi_epi16(_val0, _w2);
__m128i _sl03 = _mm_mullo_epi16(_val0, _w3);
__m128i _sh03 = _mm_mulhi_epi16(_val0, _w3);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
__m128i _sl12 = _mm_mullo_epi16(_val1, _w2);
__m128i _sh12 = _mm_mulhi_epi16(_val1, _w2);
__m128i _sl13 = _mm_mullo_epi16(_val1, _w3);
__m128i _sh13 = _mm_mulhi_epi16(_val1, _w3);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03));
_sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13));
tmpptr += 16;
kptr0 += 32;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
outptr0 += 8;
}
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
const signed char* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
__m128i _sl0 = _mm_mullo_epi16(_val, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val, _w1);
__m128i _sh1 = _mm_mulhi_epi16(_val, _w1);
__m128i _sl2 = _mm_mullo_epi16(_val, _w2);
__m128i _sh2 = _mm_mulhi_epi16(_val, _w2);
__m128i _sl3 = _mm_mullo_epi16(_val, _w3);
__m128i _sh3 = _mm_mulhi_epi16(_val, _w3);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3));
_sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3));
tmpptr += 8;
kptr0 += 32;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
_mm_storeu_si128((__m128i*)outptr0, _sum0);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(32 * maxk, inch / 8, outch / 4, (size_t)1u);
for (int q = 0; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
int64_t* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to4_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
inputBug313.c | #include<stdio.h>
int main()
{
//int i;
#pragma omp parallel
{
printf("Hello.\n");
}
return 0;
}
|
Gemm_MT_Loop3_MRxNRKernel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include<immintrin.h>
#define alpha( i,j ) A[ (j)*ldA + (i) ] // map alpha( i,j ) to array A
#define beta( i,j ) B[ (j)*ldB + (i) ] // map beta( i,j ) to array B
#define gamma( i,j ) C[ (j)*ldC + (i) ] // map gamma( i,j ) to array C
#define min( x, y ) ( ( x ) < ( y ) ? x : y )
void LoopFive( int, int, int, double *, int, double *, int, double *, int );
void LoopFour( int, int, int, double *, int, double *, int, double *, int );
void LoopThree( int, int, int, double *, int, double *, double *, int );
void LoopTwo( int, int, int, double *, double *, double *, int );
void LoopOne( int, int, int, double *, double *, double *, int );
void Gemm_MRxNRKernel_Packed( int, double *, double *, double *, int );
void PackBlockA_MCxKC( int, int, double *, int, double * );
void PackPanelB_KCxNC( int, int, double *, int, double * );
void MyGemm( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
if ( m % MR != 0 || MC % MR != 0 ){
printf( "m and MC must be multiples of MR\n" );
exit( 0 );
}
if ( n % NR != 0 || NC % NR != 0 ){
printf( "n and NC must be multiples of NR\n" );
exit( 0 );
}
LoopFive( m, n, k, A, ldA, B, ldB, C, ldC );
}
void LoopFive( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
for ( int j=0; j<n; j+=NC ) {
int jb = min( NC, n-j ); /* Last loop may not involve a full block */
LoopFour( m, jb, k, A, ldA, &beta( 0,j ), ldB, &gamma( 0,j ), ldC );
}
}
void LoopFour( int m, int n, int k, double *A, int ldA, double *B, int ldB,
double *C, int ldC )
{
double *Btilde = ( double * ) _mm_malloc( KC * NC * sizeof( double ), 64 );
for ( int p=0; p<k; p+=KC ) {
int pb = min( KC, k-p ); /* Last loop may not involve a full block */
PackPanelB_KCxNC( pb, n, &beta( p, 0 ), ldB, Btilde );
LoopThree( m, n, pb, &alpha( 0, p ), ldA, Btilde, C, ldC );
}
_mm_free( Btilde);
}
void LoopThree( int m, int n, int k, double *A, int ldA, double *Btilde, double *C, int ldC )
{
#if 0
#pragma omp parallel for
for ( int i=0; i<m; i+=MC ) {
int ib = min( MC, m-i ); /* Last loop may not involve a full block */
double *Atilde = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 );
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Atilde );
LoopTwo( ib, n, k, Atilde, Btilde, &gamma( i,0 ), ldC );
_mm_free( Atilde);
}
#endif
#if 0
double *Atilde[OMP_NUM_THREADS];
for (int i = 0; i < OMP_NUM_THREADS; i++)
{
Atilde[i] = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 );
}
for ( int i=0; i<m; i+=MC ) {
int ib = min( MC, m-i ); /* Last loop may not involve a full block */
int thidx = omp_get_thread_num();
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Atilde[thidx]);
LoopTwo( ib, n, k, Atilde[thidx], Btilde, &gamma( i,0 ), ldC );
}
for (int i = 0; i < OMP_NUM_THREADS; i++)
{
_mm_free( Atilde[i]);
}
#endif
double *Atilde = ( double * ) _mm_malloc( MC * KC * omp_get_max_threads() * sizeof( double ), 64 );
int max_threads = omp_get_max_threads();
/* Distribute m/MC blocks among threads */
int loadbalanced_part = (m / (MC * max_threads) ) * (MC * max_threads); // integer division /
int remainder = m - loadbalanced_part;
// Distribute remainder load equally among total threads but should be multiple of MR
int remainder_per_thread = ( (remainder / max_threads ) / MR ) * MR;
if (remainder_per_thread == 0 ) remainder_per_thread = MR;
// Compute the loadbalanced part in parallel
#pragma omp parallel for
for (int i = 0; i < loadbalanced_part; i += MC) {
int ib = MC; /* Last loop may not involve a full block */
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Ã[ MC * KC * omp_get_thread_num() ] );
LoopTwo( ib, n, k, Ã[ MC * KC * omp_get_thread_num() ], Btilde, &gamma( i,0 ), ldC );
}
// Compute the rest in parallel
#pragma omp parallel for
for (int i=loadbalanced_part; i < m; i += remainder_per_thread ) {
int ib = min( m-i, remainder_per_thread ); // last loop may not involve full block
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Ã[ MC * KC * omp_get_thread_num() ] );
LoopTwo( ib, n, k, Ã[ MC * KC * omp_get_thread_num() ], Btilde, &gamma( i,0 ), ldC );
}
_mm_free( Atilde);
}
void LoopTwo( int m, int n, int k, double *Atilde, double *Btilde, double *C, int ldC )
{
for ( int j=0; j<n; j+=NR ) {
int jb = min( NR, n-j );
LoopOne( m, jb, k, Atilde, &Btilde[ j*k ], &gamma( 0,j ), ldC );
}
}
void LoopOne( int m, int n, int k, double *Atilde, double *MicroPanelB, double *C, int ldC )
{
for ( int i=0; i<m; i+=MR ) {
int ib = min( MR, m-i );
Gemm_MRxNRKernel_Packed( k, Ã[ i*k ], MicroPanelB, &gamma( i,0 ), ldC );
}
}
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 128
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
if (omp_t > 1) {
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (hexlenl(p) != CIPHERTEXT_LENGTH-PREFIX_LENGTH)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
SybasePROP_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
db_graph.h | #ifndef _DB_GRAPH_H_
#define _DB_GRAPH_H_
#include "../io/io.h"
namespace db {
template <typename T>
class GraphArc {
private:
const T* _onet = nullptr;
const T* _inet = nullptr;
unsigned _onodeIdx = 0;
unsigned _inodeIdx = 0;
double _cost = 0;
public:
GraphArc(const T* onet, const T* inet, const unsigned onodeIdx, const unsigned inodeIdx, const double cost)
: _onet(onet), _inet(inet), _onodeIdx(onodeIdx), _inodeIdx(inodeIdx), _cost(cost) {}
const T* inet() const { return _inet; }
double cost() const { return _cost; }
unsigned write(
ostream& os, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir) const;
bool operator<(const GraphArc<T>& rhs) { return _cost < rhs._cost; }
};
template <typename T>
unsigned GraphArc<T>::write(
ostream& os, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir) const {
const NetRouteUpNode& onode = _onet->upVias[_onodeIdx];
const NetRouteUpNode& inode = _inet->upVias[_inodeIdx];
const int dx = onode[dir] - inode[dir];
const int dy = onode[1 - dir] - inode[1 - dir];
const double px = dx / static_cast<double>(pitch[dir]);
const double py = dy / static_cast<double>(pitch[1 - dir]);
const double rx = dx / static_cast<double>(die[dir].range());
const double ry = dy / static_cast<double>(die[1 - dir].range());
const unsigned numOPins = _onet->numOPins();
os << _onet->name() << ',' << _inet->name() << ',' << _onodeIdx << ',' << _inodeIdx << ',' << px << ',' << py << ','
<< abs(px) << ',' << abs(py) << ',' << rx << ',' << ry << ',' << abs(rx) << ',' << abs(ry) << ',' << numOPins
<< ',' << _inet->numOPins() << ',';
if (_onet->upPin()) {
os << "1,";
} else {
os << "0,";
}
if (_inet->upPin()) {
os << "1,";
} else {
os << "0,";
}
os << _onet->pitchLen() << ',' << _onet->numVias() << ',' << _onet->pitchLen(DBModule::Metal - 1) << ',';
for (int i = static_cast<int>(DBModule::Metal) - 2; i != static_cast<int>(DBModule::Metal) - 5; --i) {
if (i >= 0) {
os << _onet->numVias(i) << ',' << _onet->pitchLen(i) << ',';
} else {
os << 0 << ',' << 0 << ',';
}
}
os << _inet->pitchLen() << ',' << _inet->numVias() << ',' << _inet->pitchLen(DBModule::Metal - 1) << ',';
for (int i = static_cast<int>(DBModule::Metal) - 2; i != static_cast<int>(DBModule::Metal) - 5; --i) {
if (i >= 0) {
os << _inet->numVias(i) << ',' << _inet->pitchLen(i) << ',';
} else {
os << 0 << ',' << 0 << ',';
}
}
if (_onet->parent() == _inet->parent()) {
os << "1\n";
return numOPins;
} else {
os << "0\n";
return 0;
}
}
template <typename T>
class Graph {
private:
static constexpr double INFTY =
numeric_limits<double>::has_infinity ? numeric_limits<double>::infinity() : numeric_limits<double>::max();
unsigned _nONodes = 0;
unsigned _nINodes = 0;
unsigned _nOPins = 0;
using CostType = long long;
using UCostType = unsigned long long;
vector<T*> _onodes;
public:
static vector<T*> inodes;
static mutex selMtx;
static unsigned totalNumONetsMissed;
static unsigned nFlows;
static unsigned nCoveredNets;
static unsigned nCoveredPins;
static ofstream selOfs;
private:
void addNode(T* splitNet);
static void writeSel(
const T* onet, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir);
static void writeImg(
const T* splitNet, const vector<T*>& splitNets, const Image& image, const string& path, const unsigned dir);
public:
bool run(const vector<T*>& splitNets,
const Image& image,
const Rectangle& die,
const Point& pitch,
const unsigned dir,
const string& file);
};
template <typename T>
vector<T*> Graph<T>::inodes;
template <typename T>
mutex Graph<T>::selMtx;
template <typename T>
unsigned Graph<T>::totalNumONetsMissed;
template <typename T>
unsigned Graph<T>::nFlows;
template <typename T>
unsigned Graph<T>::nCoveredNets;
template <typename T>
unsigned Graph<T>::nCoveredPins;
template <typename T>
ofstream Graph<T>::selOfs;
template <typename T>
void Graph<T>::addNode(T* splitNet) {
if (splitNet->isSource()) {
inodes.push_back(splitNet);
++_nINodes;
} else if (splitNet->isSink()) {
_onodes.push_back(splitNet);
++_nONodes;
_nOPins += splitNet->numOPins();
}
}
template <typename T>
void Graph<T>::writeSel(
const T* onet, const string& design, const Rectangle& die, const Point& pitch, const unsigned dir) {
bool isMissed = true;
bool isCovered = true;
vector<GraphArc<T>> arcs;
for (T* inet : inodes) {
double icap = 0;
const bool isLoop = T::isLoop(inet, onet);
if (!isLoop) {
unsigned onodeIdx = 0;
unsigned inodeIdx = 0;
UCostType cost = ULLONG_MAX;
bool select = false;
const int dieW = die[dir].range();
for (unsigned i = 0; i != onet->upVias.size(); ++i) {
const NetRouteUpNode& on = onet->upVias[i];
const int ox = on[dir] - die[dir].lo();
const bool ob = ox < dieW * 0.08 || ox > dieW * 0.92;
for (unsigned j = 0; j != inet->upVias.size(); ++j) {
const NetRouteUpNode& in = inet->upVias[j];
const int ix = in[dir] - die[dir].lo();
const bool ib = ix < dieW * 0.08 || ix > dieW * 0.92;
const UCostType c = (static_cast<UCostType>(abs(ix - ox)) << 32) + abs(in[1 - dir] - on[1 - dir]);
const bool d = ob || ib || Net::isDirection(in, on);
if (d && c < cost) {
cost = c;
select = true;
onodeIdx = i;
inodeIdx = j;
}
}
}
if (select) {
if (arcs.size() < DBModule::NumCands) {
arcs.emplace_back(onet, inet, onodeIdx, inodeIdx, cost);
if (arcs.size() == DBModule::NumCands) make_heap(arcs.begin(), arcs.end());
} else if (cost < arcs.front().cost()) {
pop_heap(arcs.begin(), arcs.end());
if (onet->parent() == arcs.back().inet()->parent()) {
isCovered = false;
// return;
}
arcs.pop_back();
arcs.emplace_back(onet, inet, onodeIdx, inodeIdx, cost);
push_heap(arcs.begin(), arcs.end());
} else if (onet->parent() == inet->parent()) {
isCovered = false;
// return;
}
icap = 1;
}
}
if (onet->parent() == inet->parent()) {
if (icap) {
isMissed = false;
} else {
if (isLoop) printlog(LOG_WARN, "onet %s inet %s is loop", onet->name().c_str(), inet->name().c_str());
isCovered = false;
// lock_guard<mutex> lock(selMtx);
// ++totalNumONetsMissed;
// return;
}
}
}
lock_guard<mutex> lock(selMtx);
if (isMissed) {
++totalNumONetsMissed;
// return;
}
if (isCovered) {
++nCoveredNets;
nCoveredPins += onet->numOPins();
}
nFlows += arcs.size();
for (const GraphArc<T>& arc : arcs) arc.write(selOfs, design, die, pitch, dir);
}
template <typename T>
void Graph<T>::writeImg(
const T* splitNet, const vector<T*>& splitNets, const Image& image, const string& path, const unsigned dir) {
for (unsigned i = 0; i != splitNet->upVias.size(); ++i) {
const NetRouteUpNode& node = splitNet->upVias[i];
int x = node.x();
int y = node.y();
for (unsigned j = 1; j <= 4; j *= 2) {
const double xOrigin = x - image.xStep() * j * 49.5;
const double yOrigin = y - image.yStep() * j * 49.5;
const double xStep = image.xStep() * j;
const double yStep = image.yStep() * j;
const int xCeil =
min<int>(image.xNum(), ceil((xOrigin + xStep * 99 - image.xOrigin()) / image.xStep()) + 0.5);
const int yCeil =
min<int>(image.yNum(), ceil((yOrigin + yStep * 99 - image.yOrigin()) / image.yStep()) + 0.5);
Image img(xOrigin, yOrigin, xStep, yStep, 99, 99);
img.setRouting(splitNet, 4);
for (unsigned yIdx = max<int>(0, (yOrigin - image.yOrigin()) / image.yStep());
static_cast<int>(yIdx) < yCeil;
++yIdx) {
for (unsigned xIdx = max<int>(0, (xOrigin - image.xOrigin()) / image.xStep());
static_cast<int>(xIdx) < xCeil;
++xIdx) {
img.setData(image.yOrigin() + image.yStep() * (yIdx + 0.5),
image.xOrigin() + image.xStep() * (xIdx + 0.5),
image.data(yIdx, xIdx));
}
}
img.encode(path + "/" + splitNet->name() + "_" + to_string(i) + "_" + to_string(j) + ".png", dir);
}
}
}
template <typename T>
bool Graph<T>::run(const vector<T*>& splitNets,
const Image& image,
const Rectangle& die,
const Point& pitch,
const unsigned dir,
const string& file) {
for (T* splitNet : splitNets) {
if (splitNet->upVias.empty()) continue;
addNode(splitNet);
}
size_t pos0 = file.find_last_of('/');
string path = ".";
string csv = file;
if (pos0 != string::npos) {
path = file.substr(0, pos0);
csv = file.substr(pos0 + 1);
}
size_t pos1 = csv.find('.');
const string& base = csv.substr(0, pos1);
path = path + "/" + base;
size_t pos2 = base.find_last_of('_');
const string& design = base.substr(0, pos2);
ofstream wscOfs = io::IOModule::write(path + "/" + base + ".wsc.csv", true);
ofstream drvOfs = io::IOModule::write(path + "/" + base + ".drv.csv", true);
ofstream snkOfs = io::IOModule::write(path + "/" + base + ".snk.csv", true);
wscOfs << "Vpin ID,X coordinate,Y coordinate,WL down to L1,Pin Type,NumberLayer One Pins,Ave Cell Area Input,Ave "
"Cell Area Output,Ave Pin X coordinate,Ave Pin Y "
"coordinate,CongestionCrouting,CongestionPlacement,Matching Vpin ID\n";
drvOfs << "DESIGN,PARENT,NAME,VIA_IDX,VIA_ABSOLUTE_X,VIA_ABSOLUTE_Y,VIA_RELATIVE_X,VIA_RELATIVE_Y,DIR_NN,DIR_NP,"
"DIR_PN,DIR_PP,SINK_COUNT,UP_PIN,WL,VIAS,WL_M4";
snkOfs << "DESIGN,PARENT,NAME,VIA_IDX,VIA_ABSOLUTE_X,VIA_ABSOLUTE_Y,VIA_RELATIVE_X,VIA_RELATIVE_Y,DIR_NN,DIR_NP,"
"DIR_PN,DIR_PP,SINK_COUNT,UP_PIN,WL,VIAS,WL_M4";
for (unsigned i = 3; i; --i) {
drvOfs << ",VIAS_V" << i << i + 1 << ",WL_M" << i;
snkOfs << ",VIAS_V" << i << i + 1 << ",WL_M" << i;
}
drvOfs << endl;
snkOfs << endl;
for (unsigned inetIdx = 0; inetIdx != inodes.size(); ++inetIdx) {
const T* inet = inodes[inetIdx];
for (const T* onet : _onodes) {
if (onet->parent() != inet->parent()) continue;
inet->write(drvOfs, design, die, pitch, dir);
onet->write(snkOfs, design, die, pitch, dir);
const NetRouteUpNode& ivia = inet->upVias[0];
const NetRouteUpNode& ovia = onet->upVias[0];
wscOfs << 'S' << inetIdx * 2 << ',' << ivia[dir] << ',' << ivia[1 - dir] << ',' << inet->len() << ",O,"
<< inet->numPins() << ',' << inet->oArea() << ',' << inet->iArea() << ',';
const Point iMeanPin = inet->meanPin();
wscOfs << iMeanPin[dir] << ',' << iMeanPin[1 - dir] << ",0,0,S" << inetIdx * 2 + 1 << endl;
wscOfs << 'S' << inetIdx * 2 + 1 << ',' << ovia[dir] << ',' << ovia[1 - dir] << ',' << onet->len() << ",I,"
<< onet->numPins() << ',' << onet->oArea() << ",0,";
const Point oMeanPin = onet->meanPin();
wscOfs << oMeanPin[dir] << ',' << oMeanPin[1 - dir] << ",0,0,S" << inetIdx * 2 << endl;
break;
}
}
wscOfs.close();
drvOfs.close();
snkOfs.close();
selOfs = io::IOModule::write(path + "/" + base + ".sel.csv", true);
selOfs << "SNK_NAME,DRV_NAME,SNK_VIA_IDX,DRV_VIA_IDX,SIGNED_ABSOLUTE_DIST_X,SIGNED_ABSOLUTE_DIST_Y,UNSIGNED_"
"ABSOLUTE_DIST_X,UNSIGNED_ABSOLUTE_DIST_Y,SIGNED_RELATIVE_DIST_X,SIGNED_RELATIVE_DIST_Y,UNSIGNED_"
"RELATIVE_DIST_X,UNSIGNED_RELATIVE_DIST_Y,SNK_SINK_COUNT,DRV_SINK_COUNT,SNK_UP_PIN,DRV_UP_PIN,SNK_WL,SNK_"
"VIAS,SNK_WL_M4,";
for (unsigned i = 3; i; --i) selOfs << "SNK_VIAS_V" << i << i + 1 << ",SNK_WL_M" << i << ',';
selOfs << "DRV_WL,DRV_VIAS,DRV_WL_M4,";
for (unsigned i = 3; i; --i) selOfs << "DRV_VIAS_V" << i << i + 1 << ",DRV_WL_M" << i << ',';
selOfs << "LABEL\n";
totalNumONetsMissed = 0;
nFlows = 0;
nCoveredNets = 0;
nCoveredPins = 0;
#pragma omp parallel for
for (unsigned i = 0; i < _nONodes; ++i) {
Graph<T>::writeSel(_onodes[i], design, die, pitch, dir);
}
selOfs.close();
if (totalNumONetsMissed) {
printlog(LOG_WARN,
"%u / %u = %f sink nets missed while adding arcs",
totalNumONetsMissed,
_nONodes,
totalNumONetsMissed / static_cast<double>(_nONodes));
}
printlog(LOG_INFO,
"%u iters %u driver nets %u flows cover %u / %u = %f sink nets and %u / %u = %f sink pins",
DBModule::NumCands,
_nINodes,
nFlows,
nCoveredNets,
_nONodes,
nCoveredNets / static_cast<double>(_nONodes),
nCoveredPins,
_nOPins,
nCoveredPins / static_cast<double>(_nOPins));
#pragma omp parallel for
for (unsigned i = 0; i < _nONodes; ++i) {
Graph<T>::writeImg(_onodes[i], splitNets, image, path, dir);
}
#pragma omp parallel for
for (unsigned i = 0; i < _nINodes; ++i) {
Graph<T>::writeImg(inodes[i], splitNets, image, path, dir);
}
return true;
}
} // namespace db
#endif
|
lsh_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
/***********************************************************************
* Author: Vincent Rabaud
*************************************************************************/
#ifndef FLANN_LSH_INDEX_H_
#define FLANN_LSH_INDEX_H_
#include <algorithm>
#include <cassert>
#include <cstring>
#include <map>
#include <vector>
#include "flann/general.h"
#include "flann/algorithms/nn_index.h"
#include "flann/util/matrix.h"
#include "flann/util/result_set.h"
#include "flann/util/heap.h"
#include "flann/util/lsh_table.h"
#include "flann/util/allocator.h"
#include "flann/util/random.h"
#include "flann/util/saving.h"
namespace flann
{
struct LshIndexParams : public IndexParams
{
LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2)
{
(* this)["algorithm"] = FLANN_INDEX_LSH;
// The number of hash tables to use
(*this)["table_number"] = table_number;
// The length of the key in the hash tables
(*this)["key_size"] = key_size;
// Number of levels to use in multi-probe (0 for standard LSH)
(*this)["multi_probe_level"] = multi_probe_level;
}
};
/**
* Locality-sensitive hashing index
*
* Contains the tables and other information for indexing a set of points
* for nearest-neighbor matching.
*/
template<typename Distance>
class LshIndex : public NNIndex<Distance>
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
typedef NNIndex<Distance> BaseClass;
/** Constructor
* @param params parameters passed to the LSH algorithm
* @param d the distance used
*/
LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) :
BaseClass(params, d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
}
/** Constructor
* @param input_data dataset with the input features
* @param params parameters passed to the LSH algorithm
* @param d the distance used
*/
LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) :
BaseClass(params, d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
setDataset(input_data);
}
LshIndex(const LshIndex& other) : BaseClass(other),
tables_(other.tables_),
table_number_(other.table_number_),
key_size_(other.key_size_),
multi_probe_level_(other.multi_probe_level_),
xor_masks_(other.xor_masks_)
{
}
LshIndex& operator=(LshIndex other)
{
this->swap(other);
return *this;
}
virtual ~LshIndex()
{
freeIndex();
}
BaseClass* clone() const
{
return new LshIndex(*this);
}
using BaseClass::buildIndex;
void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
assert(points.cols==veclen_);
size_t old_size = size_;
extendDataset(points);
if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) {
buildIndex();
}
else {
for (unsigned int i = 0; i < table_number_; ++i) {
lsh::LshTable<ElementType>& table = tables_[i];
for (size_t i=old_size;i<size_;++i) {
table.add(i, points_[i]);
}
}
}
}
flann_algorithm_t getType() const
{
return FLANN_INDEX_LSH;
}
template<typename Archive>
void serialize(Archive& ar)
{
ar.setObject(this);
ar & *static_cast<NNIndex<Distance>*>(this);
ar & table_number_;
ar & key_size_;
ar & multi_probe_level_;
ar & xor_masks_;
ar & tables_;
if (Archive::is_loading::value) {
index_params_["algorithm"] = getType();
index_params_["table_number"] = table_number_;
index_params_["key_size"] = key_size_;
index_params_["multi_probe_level"] = multi_probe_level_;
}
}
void saveIndex(FILE* stream)
{
serialization::SaveArchive sa(stream);
sa & *this;
}
void loadIndex(FILE* stream)
{
serialization::LoadArchive la(stream);
la & *this;
}
/**
* Computes the index memory usage
* Returns: memory used by the index
*/
int usedMemory() const
{
return size_ * sizeof(int);
}
/**
* \brief Perform k-nearest neighbor search
* \param[in] queries The query points for which to find the nearest neighbors
* \param[out] indices The indices of the nearest neighbors found
* \param[out] dists Distances to the nearest neighbors found
* \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
int count = 0;
if (params.use_heap==FLANN_True) {
#pragma omp parallel num_threads(params.cores)
{
KNNUniqueResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
* \brief Perform k-nearest neighbor search
* \param[in] queries The query points for which to find the nearest neighbors
* \param[out] indices The indices of the nearest neighbors found
* \param[out] dists Distances to the nearest neighbors found
* \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (params.use_heap==FLANN_True) {
#pragma omp parallel num_threads(params.cores)
{
KNNUniqueResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const
{
getNeighbors(vec, result);
}
protected:
/**
* Builds the index
*/
void buildIndexImpl()
{
tables_.resize(table_number_);
std::vector<std::pair<size_t,ElementType*> > features;
features.reserve(points_.size());
for (size_t i=0;i<points_.size();++i) {
features.push_back(std::make_pair(i, points_[i]));
}
for (unsigned int i = 0; i < table_number_; ++i) {
lsh::LshTable<ElementType>& table = tables_[i];
table = lsh::LshTable<ElementType>(veclen_, key_size_);
// Add the features to the table
table.add(features);
}
}
void freeIndex()
{
/* nothing to do here */
}
private:
/** Defines the comparator on score and index
*/
typedef std::pair<float, unsigned int> ScoreIndexPair;
struct SortScoreIndexPairOnSecond
{
bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const
{
return left.second < right.second;
}
};
/** Fills the different xor masks to use when getting the neighbors in multi-probe LSH
* @param key the key we build neighbors from
* @param lowest_index the lowest index of the bit set
* @param level the multi-probe level we are at
* @param xor_masks all the xor mask
*/
void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level,
std::vector<lsh::BucketKey>& xor_masks)
{
xor_masks.push_back(key);
if (level == 0) return;
for (int index = lowest_index - 1; index >= 0; --index) {
// Create a new key
lsh::BucketKey new_key = key | (lsh::BucketKey(1) << index);
fill_xor_mask(new_key, index, level - 1, xor_masks);
}
}
/** Performs the approximate nearest-neighbor search.
* @param vec the feature to analyze
* @param do_radius flag indicating if we check the radius too
* @param radius the radius if it is a radius search
* @param do_k flag indicating if we limit the number of nn
* @param k_nn the number of nearest neighbors
* @param checked_average used for debugging
*/
void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn,
float& checked_average)
{
static std::vector<ScoreIndexPair> score_index_heap;
if (do_k) {
unsigned int worst_score = std::numeric_limits<unsigned int>::max();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
hamming_distance = distance_(vec, points_[*training_index].point, veclen_);
if (hamming_distance < worst_score) {
// Insert the new element
score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
std::push_heap(score_index_heap.begin(), score_index_heap.end());
if (score_index_heap.size() > (unsigned int)k_nn) {
// Remove the highest distance value as we have too many elements
std::pop_heap(score_index_heap.begin(), score_index_heap.end());
score_index_heap.pop_back();
// Keep track of the worst score
worst_score = score_index_heap.front().first;
}
}
}
}
}
}
else {
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
// Compute the Hamming distance
hamming_distance = distance_(vec, points_[*training_index].point, veclen_);
if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
}
}
}
}
}
/** Performs the approximate nearest-neighbor search.
* This is a slower version than the above as it uses the ResultSet
* @param vec the feature to analyze
*/
void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const
{
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
// Compute the Hamming distance
hamming_distance = distance_(vec, points_[*training_index], veclen_);
result.addPoint(hamming_distance, *training_index);
}
}
}
}
void swap(LshIndex& other)
{
BaseClass::swap(other);
std::swap(tables_, other.tables_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(table_number_, other.table_number_);
std::swap(key_size_, other.key_size_);
std::swap(multi_probe_level_, other.multi_probe_level_);
std::swap(xor_masks_, other.xor_masks_);
}
/** The different hash tables */
std::vector<lsh::LshTable<ElementType> > tables_;
/** table number */
unsigned int table_number_;
/** key size */
unsigned int key_size_;
/** How far should we look for neighbors in multi-probe LSH */
unsigned int multi_probe_level_;
/** The XOR masks to apply to a key to get the neighboring buckets */
std::vector<lsh::BucketKey> xor_masks_;
USING_BASECLASS_SYMBOLS
};
}
#endif //FLANN_LSH_INDEX_H_
|
rayleigh_quotient_iteration_eigenvalue_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pooyan Dadvand
// Collaborator: Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_RAYLEIGH_QUOTIENT_ITERATION_EIGENVALUE_SOLVER_H_INCLUDED )
#define KRATOS_RAYLEIGH_QUOTIENT_ITERATION_EIGENVALUE_SOLVER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <numeric>
#include <vector>
// External includes
// Project includes
#include "includes/define.h"
#include "linear_solvers/iterative_solver.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "linear_solvers/skyline_lu_factorization_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class RayleighQuotientIterationEigenvalueSolver
* @ingroup KratosCore
* @brief This is a eigen solver based on the Rayleigh quotient iteration algorithm
* @details Rayleigh quotient iteration is an iterative method, that is, it delivers a sequence of approximate solutions that converges to a true solution in the limit (this is true for all algorithms that compute eigenvalues: since eigenvalues can be irrational numbers, there can be no general method for computing them in a finite number of steps). Very rapid convergence is guaranteed and no more than a few iterations are needed in practice to obtain a reasonable approximation. The Rayleigh quotient iteration algorithm converges cubically for Hermitian or symmetric matrices, given an initial vector that is sufficiently close to an eigenvector of the matrix that is being analyzed.
* @see https://en.wikipedia.org/wiki/Rayleigh_quotient_iteration
* @author Pooyan Dadvand
*/
template<class TSparseSpaceType, class TDenseSpaceType, class TLinearSolverType,
class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class RayleighQuotientIterationEigenvalueSolver : public IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of RayleighQuotientIterationEigenvalueSolver
KRATOS_CLASS_POINTER_DEFINITION(RayleighQuotientIterationEigenvalueSolver);
/// The base class definition
typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType;
/// The sparse matrix defintion
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
/// The vector definition ("sparse")
typedef typename TSparseSpaceType::VectorType VectorType;
/// The dense matrix definition
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
/// The "dense" vector definition
typedef typename TDenseSpaceType::VectorType DenseVectorType;
/// The size type definiton
typedef std::size_t SizeType;
/// The index type definition
typedef std::size_t IndexType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
RayleighQuotientIterationEigenvalueSolver()
{
}
/**
* @brief The "manual" settings constructor
* @param NewMaxTolerance The tolerance considered
* @param NewMaxIterationsNumber The maximum number of iterations considered
* @param NewRequiredEigenvalueNumber The number of eigen values to compute
* @param ShiftingConvergence The convergence parameter of shifting
*/
RayleighQuotientIterationEigenvalueSolver(
double NewMaxTolerance,
unsigned int NewMaxIterationsNumber,
unsigned int NewRequiredEigenvalueNumber,
typename TLinearSolverType::Pointer pLinearSolver,
double ShiftingConvergence = 0.25
): BaseType(NewMaxTolerance, NewMaxIterationsNumber),
mRequiredEigenvalueNumber(NewRequiredEigenvalueNumber),
mpLinearSolver(pLinearSolver),
mShiftingConvergence(ShiftingConvergence)
{
}
/**
* @brief The parameters constructor
* @param ThisParameters The input parameters
* @param pLinearSolver The linear solver considered
*/
RayleighQuotientIterationEigenvalueSolver(
Parameters ThisParameters,
typename TLinearSolverType::Pointer pLinearSolver
): mpLinearSolver(pLinearSolver)
{
Parameters DefaultParameters = Parameters(R"(
{
"solver_type" : "rayleigh_quotient_iteration_eigenvalue_solver",
"max_iteration" : 500,
"tolerance" : 1e-9,
"required_eigen_number" : 1,
"shifting_convergence" : 0.25,
"verbosity" : 1,
"linear_solver_settings" : {}
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
mRequiredEigenvalueNumber = ThisParameters["required_eigen_number"].GetInt();
mShiftingConvergence = ThisParameters["shifting_convergence"].GetDouble();
mEchoLevel = ThisParameters["verbosity"].GetInt();
BaseType::SetTolerance( ThisParameters["tolerance"].GetDouble() );
BaseType::SetMaxIterationsNumber( ThisParameters["max_iteration"].GetInt() );
}
/// Copy constructor.
RayleighQuotientIterationEigenvalueSolver(
const RayleighQuotientIterationEigenvalueSolver& Other) :
BaseType(Other),
mRequiredEigenvalueNumber(Other.mRequiredEigenvalueNumber), mpLinearSolver(Other.mpLinearSolver),
mShiftingConvergence(Other.mShiftingConvergence)
{
}
/// Destructor.
virtual ~RayleighQuotientIterationEigenvalueSolver() {}
///@}
///@name Operators
///@{
/// Assignment operator.
RayleighQuotientIterationEigenvalueSolver& operator=(const RayleighQuotientIterationEigenvalueSolver& Other)
{
BaseType::operator=(Other);
return *this;
}
///@}
///@name Operations
///@{
/**
* @brief This method initializes the system
* @details It computes the vector R, which contains the components of the diagonal of the M matrix
* @param rR The vector containing the normalized components of the diagonal
* @param rM The "mass" matrix
*/
static void Initialize(
VectorType& rR,
const SparseMatrixType& rM
)
{
const SizeType size_m = rM.size1();
// Resize in case of not same size
if (rR.size() != size_m)
rR.resize(size_m);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(size_m); ++i) {
rR[i] = rM(i,i);
}
KRATOS_ERROR_IF(norm_2(rR) == 0.0) << "Invalid M matrix. The norm2 of its diagonal is Zero" << std::endl;
rR /= norm_2(rR);
}
/**
* @brief This method performs a Sturm Sequence Check
* @param ShiftedK The modified K matrix after apply the M matrix
*/
SizeType SturmSequenceCheck(SparseMatrixType& ShiftedK)
{
// define an object to store skyline matrix and factorization
LUSkylineFactorization<TSparseSpaceType, TDenseSpaceType> myFactorization;
// copy myMatrix into skyline format
myFactorization.copyFromCSRMatrix(ShiftedK);
// factorize it
myFactorization.factorize();
SizeType counter = 0; // number of eigenvalues less than the shift
for(SizeType i = 0 ; i < ShiftedK.size1() ; i++){
if(myFactorization.entriesD[i] < 0.0) {
counter++;
}
}
return counter;
}
/**
* @brief The Rayleigh quotient iteration method
* @param K: The stiffness matrix
* @param M: The mass matrix
* @param Eigenvalues: The vector containing the eigen values
* @param Eigenvectors: The matrix containing the eigen vectors
*/
void Solve(
SparseMatrixType& K,
SparseMatrixType& M,
DenseVectorType& Eigenvalues,
DenseMatrixType& Eigenvectors
) override
{
SizeType size = K.size1();
SizeType max_iteration = BaseType::GetMaxIterationsNumber();
double tolerance = BaseType::GetTolerance();
VectorType x = boost::numeric::ublas::zero_vector<double>(size);
VectorType y = boost::numeric::ublas::zero_vector<double>(size);
Initialize(y,M);
if(Eigenvalues.size() < 1) {
Eigenvalues.resize(1,0.0);
}
const double epsilon = 1.0e-9;
// Starting with first step
double beta = 0.0;
double ro = 0.0;
double shift_value = 0.0;
double old_ro = 0.0;//Eigenvalues[0];
KRATOS_INFO_IF("RayleighQuotientIterationEigenvalueSolver", mEchoLevel > 1) << "Iteration beta \t ro \t\t convergence norm \t min \t\t max" << std::endl;
SparseMatrixType shifted_k(K);
// define an object to store skyline matrix and factorization
LUSkylineFactorization<TSparseSpaceType, TDenseSpaceType> my_factorization;
// Copy myMatrix into skyline format
my_factorization.copyFromCSRMatrix(shifted_k);
// Factorize it
my_factorization.factorize();
double min_shift_value = 0.0;
double max_shift_value = 0.0;
SizeType smaller_eigenvalue_numbers = 0;
for(SizeType i = 0 ; i < max_iteration ; ++i) {
//K*x = y
//mpLinearSolver->Solve(shifted_k,x,y);
my_factorization.backForwardSolve(size, y, x);
ro = inner_prod(y,x);
//y = M*x
TSparseSpaceType::Mult(M, x, y);
beta = inner_prod(x, y);
KRATOS_ERROR_IF(beta == 0.0) << "Zero beta norm!" << std::endl;
const double delta_ro = (ro / beta);
ro = delta_ro + shift_value;
KRATOS_ERROR_IF(ro == 0.0) << "Perpendicular eigenvector to M" << std::endl;
double convergence_norm = std::abs((ro - old_ro) / ro);
if(convergence_norm < mShiftingConvergence) { // Start shifting after certain convergence
// If there are no smaller eigenvalues yet we need to extend the range
if(smaller_eigenvalue_numbers == 0) {
max_shift_value = ro;
}
if((ro > max_shift_value)||(ro < min_shift_value)) {
shift_value = (max_shift_value + min_shift_value) / 2.0;
} else {
shift_value = ro;
}
SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(shifted_k, M, - shift_value);
// Copy myMatrix into skyline format
my_factorization.copyFromCSRMatrix(shifted_k);
// Factorize it
my_factorization.factorize();
SizeType new_smaller_eigenvalue_numbers = SturmSequenceCheck(shifted_k);
if(new_smaller_eigenvalue_numbers == 0) {
min_shift_value = shift_value;
} else {
max_shift_value = shift_value;
smaller_eigenvalue_numbers = new_smaller_eigenvalue_numbers;
}
unsigned int iteration_number = 0;
unsigned int max_shift_number = 4;
while((smaller_eigenvalue_numbers > 1) && (max_shift_value-min_shift_value > epsilon) && (iteration_number++ < max_shift_number)) {
shift_value = (max_shift_value + min_shift_value) / 2.0;
shifted_k = K;
SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(shifted_k, M, - shift_value);
// Copy myMatrix into skyline format
my_factorization.copyFromCSRMatrix(shifted_k);
// Factorize it
my_factorization.factorize();
new_smaller_eigenvalue_numbers = SturmSequenceCheck(shifted_k);
if(new_smaller_eigenvalue_numbers == 0) {
min_shift_value = shift_value;
KRATOS_INFO_IF("RayleighQuotientIterationEigenvalueSolver", mEchoLevel > 1) << " Finding " << smaller_eigenvalue_numbers << " eigenvalues in [" << min_shift_value << "," << max_shift_value << "]" << std::endl;
} else {
max_shift_value = shift_value;
smaller_eigenvalue_numbers = new_smaller_eigenvalue_numbers;
KRATOS_INFO_IF("RayleighQuotientIterationEigenvalueSolver", mEchoLevel > 1) << " Finding " << smaller_eigenvalue_numbers << " eigenvalues in [" << min_shift_value << "," << max_shift_value << "]" << std::endl;
}
}
}
if(beta < 0.0) {
beta = -std::sqrt(-beta);
} else {
beta = std::sqrt(beta);
}
TSparseSpaceType::InplaceMult(y, 1.0/beta);
KRATOS_INFO_IF("RayleighQuotientIterationEigenvalueSolver", mEchoLevel > 1) << i << " \t " << beta << " \t " << ro << " \t " << convergence_norm << " \t\t " << min_shift_value << " \t " << max_shift_value << std::endl;
if(convergence_norm < tolerance) {
break;
}
old_ro = ro;
}
KRATOS_INFO_IF("RayleighQuotientIterationEigenvalueSolver", mEchoLevel > 0) << "ro:\n" << ro << std::endl << std::endl << "y:\n" << y << std::endl;
Eigenvalues[0] = ro;
if((Eigenvectors.size1() < 1) || (Eigenvectors.size2() < size)) {
Eigenvectors.resize(1,size);
}
for(SizeType i = 0 ; i < size ; i++) {
Eigenvectors(0,i) = x[i] / beta;
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "Power iteration eigenvalue solver with " << BaseType::GetPreconditioner()->Info();
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
BaseType::PrintData(rOStream);
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
unsigned int mRequiredEigenvalueNumber;
unsigned int mEchoLevel;
typename TLinearSolverType::Pointer mpLinearSolver;
double mShiftingConvergence;
std::vector<DenseVectorType> mQVector;
std::vector<DenseVectorType> mPVector;
std::vector<DenseVectorType> mRVector;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class RayleighQuotientIterationEigenvalueSolver
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType,
class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
RayleighQuotientIterationEigenvalueSolver<TSparseSpaceType, TDenseSpaceType,
TPreconditionerType, TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType,
class TReordererType>
inline std::ostream& operator << (std::ostream& OStream,
const RayleighQuotientIterationEigenvalueSolver<TSparseSpaceType, TDenseSpaceType,
TPreconditionerType, TReordererType>& rThis)
{
rThis.PrintInfo(OStream);
OStream << std::endl;
rThis.PrintData(OStream);
return OStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_RAYLEIGH_QUOTIENT_ITERATION_EIGENVALUE_SOLVER_H_INCLUDED defined
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
register ssize_t
i;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(GetImageListLength(images),MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
register ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view,
**image_view;
const Image
*next;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
j,
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
image_view=(CacheView **) AcquireQuantumMemory(number_images,
sizeof(*image_view));
if (image_view == (CacheView **) NULL)
{
image=DestroyImage(image);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(image);
}
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
image_view[j]=AcquireVirtualCacheView(next,exception);
next=GetNextImageInList(next);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (j=0; j < (ssize_t) number_images; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),op,
evaluate_pixel[j].channel[i]);
}
p[j]+=GetPixelChannels(next);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p[j]+=GetPixelChannels(next);
}
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
for (j=0; j < (ssize_t) number_images; j++)
image_view[j]=DestroyCacheView(image_view[j]);
image_view=(CacheView **) RelinquishMagickMemory(image_view);
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
convolution_1x1_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -128) return -128;
return (signed char)int32;
}
#if __aarch64__
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
// kernel memory packed 4 x 4
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch;
const signed char* k1 = kernel + (p+1)*inch;
const signed char* k2 = kernel + (p+2)*inch;
const signed char* k3 = kernel + (p+3)*inch;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// bottom_tm memory packed 4 x 4
ncnn::Mat bottom_tm(4, inch, size/4 + size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_blob.channel(0);
const signed char* img1 = bottom_blob.channel(1);
img0 += i;
img1 += i;
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img0[1];
tmpptr[3] = img1[1];
tmpptr[4] = img0[2];
tmpptr[5] = img1[2];
tmpptr[6] = img0[3];
tmpptr[7] = img1[3];
tmpptr += 8;
img0 += bottom_blob.cstep;
img0 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
}
for (; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p+1);
int* outptr2 = top_blob.channel(p+2);
int* outptr3 = top_blob.channel(p+3);
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #1024] \n"
"prfm pldl1keep, [%5, #1024] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"// for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n"// r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"// for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"// realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n"// w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
"st1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_0 += tmpptr[1] * kptr[1];
sum0_1 += tmpptr[2] * kptr[0];
sum0_1 += tmpptr[3] * kptr[1];
sum0_2 += tmpptr[4] * kptr[0];
sum0_2 += tmpptr[5] * kptr[1];
sum0_3 += tmpptr[6] * kptr[0];
sum0_3 += tmpptr[7] * kptr[1];
sum1_0 += tmpptr[0] * kptr[2];
sum1_0 += tmpptr[1] * kptr[3];
sum1_1 += tmpptr[2] * kptr[2];
sum1_1 += tmpptr[3] * kptr[3];
sum1_2 += tmpptr[4] * kptr[2];
sum1_2 += tmpptr[5] * kptr[3];
sum1_3 += tmpptr[6] * kptr[2];
sum1_3 += tmpptr[7] * kptr[3];
sum2_0 += tmpptr[0] * kptr[4];
sum2_0 += tmpptr[1] * kptr[5];
sum2_1 += tmpptr[2] * kptr[4];
sum2_1 += tmpptr[3] * kptr[5];
sum2_2 += tmpptr[4] * kptr[4];
sum2_2 += tmpptr[5] * kptr[5];
sum2_3 += tmpptr[6] * kptr[4];
sum2_3 += tmpptr[7] * kptr[5];
sum3_0 += tmpptr[0] * kptr[6];
sum3_0 += tmpptr[1] * kptr[7];
sum3_1 += tmpptr[2] * kptr[6];
sum3_1 += tmpptr[3] * kptr[7];
sum3_2 += tmpptr[4] * kptr[6];
sum3_2 += tmpptr[5] * kptr[7];
sum3_3 += tmpptr[6] * kptr[6];
sum3_3 += tmpptr[7] * kptr[7];
tmpptr += 8;
kptr += 8;
}
for (; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
#endif
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+3<inch; q=q+4)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
tmpptr += 4;
kptr += 16;
}
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
tmpptr += 1;
kptr += 4;
}
vst1q_lane_s32(outptr0, _sum, 0);
vst1q_lane_s32(outptr1, _sum, 1);
vst1q_lane_s32(outptr2, _sum, 2);
vst1q_lane_s32(outptr3, _sum, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[0] * kptr[2];
sum1 += tmpptr[1] * kptr[3];
sum2 += tmpptr[0] * kptr[4];
sum2 += tmpptr[1] * kptr[5];
sum3 += tmpptr[0] * kptr[6];
sum3 += tmpptr[1] * kptr[7];
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr += 1;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
#endif
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(kptr); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(kptr); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
tmpptr += 4;
kptr += 1;
}
vst1q_s32(outptr0, _sum);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[2] * kptr[0];
sum1 += tmpptr[3] * kptr[1];
sum2 += tmpptr[4] * kptr[0];
sum2 += tmpptr[5] * kptr[1];
sum3 += tmpptr[6] * kptr[0];
sum3 += tmpptr[7] * kptr[1];
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
#endif
outptr0 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// bottom_tm memory packed 4 x 4
ncnn::Mat bottom_tm(4, inch, size/4 + size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_blob.channel(0);
const signed char* img1 = bottom_blob.channel(1);
img0 += i;
img1 += i;
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img0[1];
tmpptr[3] = img1[1];
tmpptr[4] = img0[2];
tmpptr[5] = img1[2];
tmpptr[6] = img0[3];
tmpptr[7] = img1[3];
tmpptr += 8;
img0 += bottom_blob.cstep;
img0 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
}
for (; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p+1);
signed char* outptr2 = top_blob.channel(p+2);
signed char* outptr3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float scale_requant_in0 = scales_requant[2*p];
const float scale_requant_out0 = scales_requant[2*p+1];
const float scale_requant_in1 = scales_requant[2*(p+1)];
const float scale_requant_out1 = scales_requant[2*(p+1)+1];
const float scale_requant_in2 = scales_requant[2*(p+2)];
const float scale_requant_out2 = scales_requant[2*(p+2)+1];
const float scale_requant_in3 = scales_requant[2*(p+3)];
const float scale_requant_out3 = scales_requant[2*(p+3)+1];
float32x4_t _bias03, _scale_in03, _scale_out03;
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _bias1 = vdupq_n_f32(bias1);
float32x4_t _bias2 = vdupq_n_f32(bias2);
float32x4_t _bias3 = vdupq_n_f32(bias3);
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4);
#if 1 //__ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #1024] \n"
"prfm pldl1keep, [%5, #1024] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"// for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n"// r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"// for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"// realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n"// w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
"scvtf v22.4s, v22.4s \n"
"scvtf v23.4s, v23.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, %17.s[0] \n"
"fmul v21.4s, v21.4s, %17.s[1] \n"
"fmul v22.4s, v22.4s, %17.s[2] \n"
"fmul v23.4s, v23.4s, %17.s[3] \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, %13.4s \n"
"fadd v21.4s, v21.4s, %14.4s \n"
"fadd v22.4s, v22.4s, %15.4s \n"
"fadd v23.4s, v23.4s, %16.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, %18.s[0] \n"
"fmul v21.4s, v21.4s, %18.s[1] \n"
"fmul v22.4s, v22.4s, %18.s[2] \n"
"fmul v23.4s, v23.4s, %18.s[3] \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
"fcvtas v22.4s, v22.4s \n"
"fcvtas v23.4s, v23.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
"sqxtn v8.4h, v22.4s \n"
"sqxtn2 v8.8h, v23.4s \n"
// top_s16 -> top_s8
"sqxtn v0.8b, v7.8h \n"
"sqxtn v1.8b, v8.8h \n"
// save top_s8
"st1 {v0.s}[0], [%0] \n"
"st1 {v0.s}[1], [%1] \n"
"st1 {v1.s}[0], [%2] \n"
"st1 {v1.s}[1], [%3] \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias0), // %13
"w"(_bias1), // %14
"w"(_bias2), // %15
"w"(_bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_0 += tmpptr[1] * kptr[1];
sum0_1 += tmpptr[2] * kptr[0];
sum0_1 += tmpptr[3] * kptr[1];
sum0_2 += tmpptr[4] * kptr[0];
sum0_2 += tmpptr[5] * kptr[1];
sum0_3 += tmpptr[6] * kptr[0];
sum0_3 += tmpptr[7] * kptr[1];
sum1_0 += tmpptr[0] * kptr[2];
sum1_0 += tmpptr[1] * kptr[3];
sum1_1 += tmpptr[2] * kptr[2];
sum1_1 += tmpptr[3] * kptr[3];
sum1_2 += tmpptr[4] * kptr[2];
sum1_2 += tmpptr[5] * kptr[3];
sum1_3 += tmpptr[6] * kptr[2];
sum1_3 += tmpptr[7] * kptr[3];
sum2_0 += tmpptr[0] * kptr[4];
sum2_0 += tmpptr[1] * kptr[5];
sum2_1 += tmpptr[2] * kptr[4];
sum2_1 += tmpptr[3] * kptr[5];
sum2_2 += tmpptr[4] * kptr[4];
sum2_2 += tmpptr[5] * kptr[5];
sum2_3 += tmpptr[6] * kptr[4];
sum2_3 += tmpptr[7] * kptr[5];
sum3_0 += tmpptr[0] * kptr[6];
sum3_0 += tmpptr[1] * kptr[7];
sum3_1 += tmpptr[2] * kptr[6];
sum3_1 += tmpptr[3] * kptr[7];
sum3_2 += tmpptr[4] * kptr[6];
sum3_2 += tmpptr[5] * kptr[7];
sum3_3 += tmpptr[6] * kptr[6];
sum3_3 += tmpptr[7] * kptr[7];
tmpptr += 8;
kptr += 8;
}
for (; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3);
#endif
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if 1 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+3<inch; q=q+4)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
tmpptr += 4;
kptr += 16;
}
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
tmpptr += 1;
kptr += 4;
}
// top_s32 -> top_f32
float32x4_t _sum_f32 = vcvtq_f32_s32(_sum);
// top_f32 = top_f32 * scale_in
_sum_f32 = vmulq_f32(_sum_f32, _scale_in03);
// top_f32 = top_f32 + bias
_sum_f32 = vaddq_f32(_sum_f32, _bias03);
// top_f32 = top_f32 * scale_out
_sum_f32 = vmulq_f32(_sum_f32, _scale_out03);
// top_f32 -> top_s32
_sum = vcvtaq_s32_f32(_sum_f32);
// top_s32 -> top_s16
int16x4_t _sum_s16 = vqmovn_s32(_sum);
int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16);
// top_s16 -> top_s8
int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp);
// save top_s8
vst1_lane_s8(outptr0, _sum_s8, 0);
vst1_lane_s8(outptr1, _sum_s8, 1);
vst1_lane_s8(outptr2, _sum_s8, 2);
vst1_lane_s8(outptr3, _sum_s8, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[0] * kptr[2];
sum1 += tmpptr[1] * kptr[3];
sum2 += tmpptr[0] * kptr[4];
sum2 += tmpptr[1] * kptr[5];
sum3 += tmpptr[0] * kptr[6];
sum3 += tmpptr[1] * kptr[7];
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr += 1;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
#endif
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if 1 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(kptr); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(kptr); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
tmpptr += 4;
kptr += 1;
}
// top_s32 -> top_f32
float32x4_t _sum_f32 = vcvtq_f32_s32(_sum);
// top_f32 = top_f32 * scale_in
_sum_f32 = vmulq_f32(_sum_f32, _scale_in);
// top_f32 = top_f32 + bias
_sum_f32 = vaddq_f32(_sum_f32, _bias0);
// top_f32 = top_f32 * scale_out
_sum_f32 = vmulq_f32(_sum_f32, _scale_out);
// top_f32 -> top_s32
_sum = vcvtaq_s32_f32(_sum_f32);
// top_s32 -> top_s16
int16x4_t _sum_s16 = vqmovn_s32(_sum);
int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16);
// top_s16 -> top_s8
int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp);
// save top_s8
vst1_s8(outptr0, _sum_s8);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[2] * kptr[0];
sum1 += tmpptr[3] * kptr[1];
sum2 += tmpptr[4] * kptr[0];
sum2 += tmpptr[5] * kptr[1];
sum3 += tmpptr[6] * kptr[0];
sum3 += tmpptr[7] * kptr[1];
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out);
outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out);
outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out);
#endif
outptr0 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0++;
}
}
}
#else
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u);
int p = 0;
for (; p+3<outch; p+=4)
{
const signed char* kernel0 = kernel + (p+0)*inch;
const signed char* kernel1 = kernel + (p+1)*inch;
const signed char* kernel2 = kernel + (p+2)*inch;
const signed char* kernel3 = kernel + (p+3)*inch;
signed char* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const signed char* kernel0 = kernel + p*inch;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
/*
* Convolution 1x1 quantized with sgemm int8
*/
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON__
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p+1);
int* outptr2 = top_blob.channel(p+2);
int* outptr3 = top_blob.channel(p+3);
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d7}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q8, d4, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d5, d0[1] \n"
"vmlal.s16 q10, d4, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d5, d0[2] \n"
"vmlal.s16 q12, d4, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d5, d0[3] \n"
"vmlal.s16 q6, d6, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d7, d1[0] \n"
"vmlal.s16 q8, d6, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d7, d1[1] \n"
"vmlal.s16 q10, d6, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d7, d1[2] \n"
"vmlal.s16 q12, d6, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d7, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
"vst1.s32 {d16-d19}, [%1]! \n"
"vst1.s32 {d20-d23}, [%2]! \n"
"vst1.s32 {d24-d27}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d4, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d4, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d4, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d5, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d5, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d5, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d5, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d6, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d6, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d6, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d6, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d7, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d7, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d7, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d7, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
"vst1.s32 {d14-d15}, [%1]! \n"
"vst1.s32 {d16-d17}, [%2]! \n"
"vst1.s32 {d18-d19}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d20[0]}, [%0]! \n"
"vst1.s32 {d20[1]}, [%1]! \n"
"vst1.s32 {d21[0]}, [%2]! \n"
"vst1.s32 {d21[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm int8
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// int* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// int sum = 0;
//
// const signed char* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const signed char* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON__
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p+1);
signed char* outptr2 = top_blob.channel(p+2);
signed char* outptr3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float scale_requant_in0 = scales_requant[2*p];
const float scale_requant_out0 = scales_requant[2*p+1];
const float scale_requant_in1 = scales_requant[2*(p+1)];
const float scale_requant_out1 = scales_requant[2*(p+1)+1];
const float scale_requant_in2 = scales_requant[2*(p+2)];
const float scale_requant_out2 = scales_requant[2*(p+2)+1];
const float scale_requant_in3 = scales_requant[2*(p+3)];
const float scale_requant_out3 = scales_requant[2*(p+3)+1];
float32x4_t _bias03, _scale_in03, _scale_out03;
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d31}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d31 \n"// a30-a37
"vmovl.s8 q4, d30 \n"// a20-a27
"vmovl.s8 q15, d29 \n"// a10-a17
"vmovl.s8 q14, d28 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d29, d0[0] \n"
"vmlal.s16 q8, d28, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d29, d0[1] \n"
"vmlal.s16 q10, d28, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d29, d0[2] \n"
"vmlal.s16 q12, d28, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d29, d0[3] \n"
"vmlal.s16 q6, d30, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d31, d1[0] \n"
"vmlal.s16 q8, d30, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d31, d1[1] \n"
"vmlal.s16 q10, d30, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d31, d1[2] \n"
"vmlal.s16 q12, d30, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d31, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[0] \n"
"vmul.f32 q8, q8, %e17[1] \n"
"vmul.f32 q9, q9, %e17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q14 \n"
"vadd.f32 q8, q8, q15 \n"
"vadd.f32 q9, q9, q15 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
// sum1
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %e18[1] \n"
"vmul.f32 q1, q9, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.8 {d16}, [%1]! \n"
// sum2
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
"vcvt.f32.s32 q13, q13 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %f17[0] \n"
"vmul.f32 q11, q11, %f17[0] \n"
"vmul.f32 q12, q12, %f17[1] \n"
"vmul.f32 q13, q13, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, q4 \n"
"vadd.f32 q11, q11, q4 \n"
"vadd.f32 q12, q12, q5 \n"
"vadd.f32 q13, q13, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %f18[0] \n"
"vmul.f32 q1, q11, %f18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d20, q0 \n"
"vqmovn.s32 d21, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d20, q10 \n"
// save top_s8
"vst1.8 {d20}, [%2]! \n"
// sum3
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q12, %f18[1] \n"
"vmul.f32 q1, q13, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d24, q0 \n"
"vqmovn.s32 d25, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d24, q12 \n"
// save top_s8
"vst1.8 {d24}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ,"q14" ,"q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d29}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q15, d29 \n"// a20-a23,a30-a33
"vmovl.s8 q14, d28 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d28, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d28, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d28, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d29, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d29, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d29, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d29, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d30, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d30, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d30, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d30, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d31, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d31, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d31, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d31, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0-1
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[1] \n"
"vmul.f32 q8, q8, %f17[0] \n"
"vmul.f32 q9, q9, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.s32 {d12[0]}, [%0]! \n"
"vst1.s32 {d12[1]}, [%1]! \n"
// sum1-2
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %f18[0] \n"
"vmul.f32 q1, q9, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.s32 {d16[0]}, [%2]! \n"
"vst1.s32 {d16[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, %q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12[0]}, [%0]! \n"
"vst1.8 {d12[1]}, [%1]! \n"
"vst1.8 {d12[2]}, [%2]! \n"
"vst1.8 {d12[3]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias03), // %13
"w"(_scale_in03), // %14
"w"(_scale_out03) // %15
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
#if __ARM_NEON
const signed char* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
"vmul.f32 q7, q7, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
"vadd.f32 q7, q7, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
"vmul.f32 q1, q7, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
#if __ARM_NEON
const signed char* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
"vst1.s32 {d12[0]}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#if __ARM_NEON
const signed char* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
#endif
static void conv1x1s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int kernel_w = 1;
int kernel_h = 1;
int stride_w = 1;
int stride_h = 1;
conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt);
}
static void conv1x1s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int kernel_w = 1;
int kernel_h = 1;
int stride_w = 2;
int stride_h = 2;
conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt);
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) {
for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(4*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(4*t3+Nx-9,128));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),32*t4+30);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
explicit_builder.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Ruben Zorrilla
//
//
#if !defined(KRATOS_EXPLICIT_BUILDER)
#define KRATOS_EXPLICIT_BUILDER
// System includes
#include <set>
#include <unordered_set>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "utilities/parallel_utilities.h"
#include "utilities/constraint_utilities.h"
#include "includes/kratos_parameters.h"
#include "utilities/atomic_utilities.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ExplicitBuilder
* @ingroup KratosCore
* @brief Current class provides an implementation for the base explicit builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* @author Ruben Zorrilla
*/
template<class TSparseSpace, class TDenseSpace >
class ExplicitBuilder
{
public:
///@name Type Definitions
///@{
/// Definition of the size type
typedef std::size_t SizeType;
/// Definition of the index type
typedef std::size_t IndexType;
/// Definition of the data type
typedef typename TSparseSpace::DataType TDataType;
///Definition of the sparse matrix
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
/// Definition of the vector size
typedef typename TSparseSpace::VectorType TSystemVectorType;
/// Definition of the pointer to the sparse matrix
typedef typename TSparseSpace::MatrixPointerType TSystemMatrixPointerType;
/// Definition of the pointer to the vector
typedef typename TSparseSpace::VectorPointerType TSystemVectorPointerType;
/// The local matrix definition
typedef typename TDenseSpace::MatrixType LocalSystemMatrixType;
/// The local vector definition
typedef typename TDenseSpace::VectorType LocalSystemVectorType;
/// Definition of the DoF class
typedef ModelPart::DofType DofType;
/// Definition of the DoF array type
typedef ModelPart::DofsArrayType DofsArrayType;
/// Definition of the DoF vector type
typedef ModelPart::DofsVectorType DofsVectorType;
/// The definition of the DoF objects
typedef typename DofsArrayType::iterator DofIteratorType;
typedef typename DofsArrayType::const_iterator DofConstantIteratorType;
/// The definition of the DoF set type
typedef typename std::unordered_set<DofType::Pointer, DofPointerHasher> DofSetType;
/// The containers of the entities
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// The definition of the element container type
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
/// The definition of the current class
typedef ExplicitBuilder<TSparseSpace, TDenseSpace> ClassType;
/// Pointer definition of ExplicitBuilder
KRATOS_CLASS_POINTER_DEFINITION(ExplicitBuilder);
///@}
///@name Life Cycle
///@{
/**
* @brief Construct a new Explicit Builder object
* Default constructor with Parameters
* @param ThisParameters The configuration parameters
*/
explicit ExplicitBuilder(Parameters ThisParameters)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Construct a new Explicit Builder object
* Default empty constructor
*/
explicit ExplicitBuilder() = default;
/**
* @brief Destroy the Explicit Builder object
* Default destructor
*/
virtual ~ExplicitBuilder() = default;
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
virtual typename ClassType::Pointer Create(Parameters ThisParameters) const
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief This method returns the flag mCalculateReactionsFlag
* @return The flag that tells if the reactions are computed
*/
bool GetCalculateReactionsFlag() const
{
return mCalculateReactionsFlag;
}
/**
* @brief This method sets the flag mCalculateReactionsFlag
* @param CalculateReactionsFlag The flag that tells if the reactions are computed
*/
void SetCalculateReactionsFlag(bool CalculateReactionsFlag)
{
mCalculateReactionsFlag = CalculateReactionsFlag;
}
/**
* @brief This method returns the flag mDofSetIsInitialized
* @return The flag that tells if the dof set is initialized
*/
bool GetDofSetIsInitializedFlag() const
{
return mDofSetIsInitialized;
}
/**
* @brief This method sets the flag mDofSetIsInitialized
* @param DofSetIsInitialized The flag that tells if the dof set is initialized
*/
void SetDofSetIsInitializedFlag(bool DofSetIsInitialized)
{
mDofSetIsInitialized = DofSetIsInitialized;
}
/**
* @brief This method returns the flag mReshapeMatrixFlag
* @return The flag that tells if we need to reset the DOF set
*/
bool GetResetDofSetFlag() const
{
return mResetDofSetFlag;
}
/**
* @brief This method sets the flag mResetDofSetFlag
* @param mResetDofSetFlag The flag that tells if we need to reset the DOF set
*/
void SetResetDofSetFlag(bool ResetDofSetFlag)
{
mResetDofSetFlag = ResetDofSetFlag;
}
/**
* @brief This method returns the flag GetResetLumpedMassVectorFlag
* @return The flag that tells if we need to reset the lumped mass vector
*/
bool GetResetLumpedMassVectorFlag() const
{
return mResetLumpedMassVectorFlag;
}
/**
* @brief This method sets the flag mResetLumpedMassVectorFlag
* @param ResetLumpedMassVectorFlag The flag that tells if we need to reset the lumped mass vector
*/
void SetResetLumpedMassVectorFlag(bool ResetLumpedMassVectorFlag)
{
mResetLumpedMassVectorFlag = ResetLumpedMassVectorFlag;
}
/**
* @brief This method returns the value mEquationSystemSize
* @return Size of the system of equations
*/
unsigned int GetEquationSystemSize() const
{
return mEquationSystemSize;
}
/**
* @brief It allows to get the list of Dofs from the element
*/
DofsArrayType& GetDofSet()
{
return mDofSet;
}
/**
* @brief It allows to get the list of Dofs from the element
*/
const DofsArrayType& GetDofSet() const
{
return mDofSet;
}
/**
* @brief Get the lumped mass matrix vector pointer
* It allows to get the lumped mass matrix vector pointer
* @return TSystemVectorPointerType& The lumped mass matrix vector pointer
*/
TSystemVectorPointerType& pGetLumpedMassMatrixVector()
{
return mpLumpedMassVector;
}
/**
* @brief Get the lumped mass matrix vector
* It allows to get the lumped mass matrix vector
* @return TSystemVectorType& The lumped mass matrix vector
*/
TSystemVectorType& GetLumpedMassMatrixVector()
{
KRATOS_ERROR_IF_NOT(mpLumpedMassVector) << "Lumped mass matrix vector is not initialized!" << std::endl;
return (*mpLumpedMassVector);
}
/**
* @brief Function to perform the build of the RHS.
* The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param rModelPart The model part to compute
*/
virtual void BuildRHS(ModelPart& rModelPart)
{
KRATOS_TRY
// Build the Right Hand Side without Dirichlet conditions
// We skip setting the Dirichlet nodes residual to zero for the sake of efficiency
// Note that this is not required as the Dirichlet conditions are set in the strategy
BuildRHSNoDirichlet(rModelPart);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param rModelPart The model part to compute
*/
virtual void BuildRHSNoDirichlet(ModelPart& rModelPart)
{
KRATOS_TRY
// Initialize the reaction (residual)
InitializeDofSetReactions();
// Gets the array of elements, conditions and constraints from the modeler
const auto &r_elements_array = rModelPart.Elements();
const auto &r_conditions_array = rModelPart.Conditions();
const int n_elems = static_cast<int>(r_elements_array.size());
const int n_conds = static_cast<int>(r_conditions_array.size());
const auto& r_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel firstprivate(n_elems, n_conds)
{
#pragma omp for schedule(guided, 512) nowait
// Assemble all elements
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
auto it_elem = r_elements_array.begin() + i_elem;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
// TODO: We will require to update this as soon as we remove the mIsDefined from the Flags
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE)) {
element_is_active = it_elem->Is(ACTIVE);
}
if (element_is_active) {
// Calculate elemental explicit residual contribution
// The explicit builder and solver assumes that the residual contribution is assembled in the REACTION variables
it_elem->AddExplicitContribution(r_process_info);
}
}
// Assemble all conditions
#pragma omp for schedule(guided, 512)
for (int i_cond = 0; i_cond < n_conds; ++i_cond) {
auto it_cond = r_conditions_array.begin() + i_cond;
// Detect if the condition is active or not. If the user did not make any choice the condition is active by default
// TODO: We will require to update this as soon as we remove the mIsDefined from the Flags
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE)) {
condition_is_active = it_cond->Is(ACTIVE);
}
if (condition_is_active) {
// Calculate condition explicit residual contribution
// The explicit builder and solver assumes that the residual contribution is assembled in the REACTION variables
it_cond->AddExplicitContribution(r_process_info);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Applies the constraints
* @param rModelPart The model part to compute
* @param rA The LHS matrix of the system of equations
* @param rb The RHS vector of the system of equations
*/
virtual void ApplyConstraints(ModelPart& rModelPart)
{
// First we reset the slave dofs
ConstraintUtilities::ResetSlaveDofs(rModelPart);
// Now we apply the constraints
ConstraintUtilities::ApplyConstraints(rModelPart);
}
/**
* @brief It applied those operations that are expected to be executed once
* @param rModelPart
*/
virtual void Initialize(ModelPart& rModelPart)
{
if (!mDofSetIsInitialized) {
// Initialize the DOF set and the equation ids
this->SetUpDofSet(rModelPart);
this->SetUpDofSetEquationIds();
// Set up the lumped mass vector
this->SetUpLumpedMassVector(rModelPart);
} else if (!mLumpedMassVectorIsInitialized) {
KRATOS_WARNING("ExplicitBuilder") << "Calling Initialize() with already initialized DOF set. Initializing lumped mass vector." << std::endl;;
// Only set up the lumped mass vector
this->SetUpLumpedMassVector(rModelPart);
} else {
KRATOS_WARNING("ExplicitBuilder") << "Calling Initialize() with already initialized DOF set and lumped mass vector." << std::endl;;
}
}
/**
* @brief It applies certain operations at the system of equations at the begining of the solution step
* @param rModelPart The model part to compute
*/
virtual void InitializeSolutionStep(ModelPart& rModelPart)
{
// Check the operations that are required to be done
if (mResetDofSetFlag) {
// If required (e.g. topology changes) reset the DOF set
// Note that we also set lumped mass vector in this case
this->SetUpDofSet(rModelPart);
this->SetUpDofSetEquationIds();
this->SetUpLumpedMassVector(rModelPart);
} else if (mResetLumpedMassVectorFlag) {
// Only reset the lumped mass vector
this->SetUpLumpedMassVector(rModelPart);
}
// Initialize the reactions (residual)
this->InitializeDofSetReactions();
}
/**
* @brief It applies certain operations at the system of equations at the end of the solution step
* @param rModelPart The model part to compute
*/
virtual void FinalizeSolutionStep(ModelPart& rModelPart)
{
// If required, calculate the reactions
if (mCalculateReactionsFlag) {
this->CalculateReactions();
}
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory
* storage not needed
*/
virtual void Clear()
{
this->mDofSet = DofsArrayType();
this->mpLumpedMassVector.reset();
KRATOS_INFO_IF("ExplicitBuilder", this->GetEchoLevel() > 0) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part to compute
* @return 0 all ok
*/
virtual int Check(const ModelPart& rModelPart) const
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
virtual Parameters GetDefaultParameters() const
{
const Parameters default_parameters = Parameters(R"(
{
"name" : "explicit_builder"
})");
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "explicit_builder";
}
/**
* @brief It sets the level of echo for the solving strategy
* @param Level The level to set
* @details The different levels of echo are:
* - 0: Mute... no echo at all
* - 1: Printing time and basic informations
* - 2: Printing linear solver data
* - 3: Print of debug informations: Echo of stiffness matrix, Dx, b...
* - 4: Print of stiffness matrix, b to Matrix Market
*/
void SetEchoLevel(int Level)
{
mEchoLevel = Level;
}
/**
* @brief It returns the echo level
* @return The echo level of the builder and solver
*/
int GetEchoLevel() const
{
return mEchoLevel;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return "ExplicitBuilder";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << Info();
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
DofsArrayType mDofSet; /// The set containing the DoF of the system
TSystemVectorPointerType mpLumpedMassVector; // The lumped mass vector associated to the DOF set
bool mResetDofSetFlag = false; /// If the DOF set requires to be set at each time step
bool mResetLumpedMassVectorFlag = false; /// If the lumped mass vector requires to be set at each time step
bool mDofSetIsInitialized = false; /// Flag taking care if the dof set was initialized ot not
bool mLumpedMassVectorIsInitialized = false; /// Flag taking care if the lumped mass vector was initialized or not
bool mCalculateReactionsFlag = false; /// Flag taking in account if it is needed or not to calculate the reactions
unsigned int mEquationSystemSize; /// Number of degrees of freedom of the problem to be solve
int mEchoLevel = 0;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs.
* @details The list of dofs is stores insde the ExplicitBuilder as it is closely connected to the way the matrix and RHS are built
* @param rModelPart The model part to compute
*/
virtual void SetUpDofSet(const ModelPart& rModelPart)
{
KRATOS_TRY;
KRATOS_INFO_IF("ExplicitBuilder", this->GetEchoLevel() > 1) << "Setting up the dofs" << std::endl;
// Gets the array of elements, conditions and constraints from the modeler
const auto &r_elements_array = rModelPart.Elements();
const auto &r_conditions_array = rModelPart.Conditions();
const auto &r_constraints_array = rModelPart.MasterSlaveConstraints();
const int n_elems = static_cast<int>(r_elements_array.size());
const int n_conds = static_cast<int>(r_conditions_array.size());
const int n_constraints = static_cast<int>(r_constraints_array.size());
// Global dof set
DofSetType dof_global_set;
dof_global_set.reserve(n_elems*20);
// Auxiliary DOFs list
DofsVectorType dof_list;
DofsVectorType second_dof_list; // The second_dof_list is only used on constraints to include master/slave relations
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
const auto& r_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
DofSetType dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Get the DOFs list from each element and insert it in the temporary set
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
const auto it_elem = r_elements_array.begin() + i_elem;
it_elem->GetDofList(dof_list, r_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Get the DOFs list from each condition and insert it in the temporary set
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond < n_conds; ++i_cond) {
const auto it_cond = r_conditions_array.begin() + i_cond;
it_cond->GetDofList(dof_list, r_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Get the DOFs list from each constraint and insert it in the temporary set
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < n_constraints; ++i_const) {
auto it_const = r_constraints_array.begin() + i_const;
it_const->GetDofList(dof_list, second_dof_list, r_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ExplicitBuilder", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
// Ordering the global DOF set
mDofSet = DofsArrayType();
DofsArrayType temp_dof_set;
temp_dof_set.reserve(dof_global_set.size());
for (auto it_dof = dof_global_set.begin(); it_dof != dof_global_set.end(); ++it_dof) {
temp_dof_set.push_back(*it_dof);
}
temp_dof_set.Sort();
mDofSet = temp_dof_set;
mEquationSystemSize = mDofSet.size();
// DoFs set checks
// Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
// Check if each DOF has a reaction. Note that the explicit residual is stored in these
for (auto it_dof = mDofSet.begin(); it_dof != mDofSet.end(); ++it_dof) {
KRATOS_ERROR_IF_NOT(it_dof->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << it_dof->Id() << std::endl
<< "Dof : " << (*it_dof) << std::endl << "Not possible to calculate reactions." << std::endl;
}
mDofSetIsInitialized = true;
KRATOS_INFO_IF("ExplicitBuilder", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << mDofSet.size() << std::endl;
KRATOS_INFO_IF("ExplicitBuilder", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
KRATOS_INFO_IF("ExplicitBuilder", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl;
KRATOS_CATCH("");
}
/**
* @brief Set the Up Dof Set Equation Ids object
* Set up the DOF set equation ids
*/
virtual void SetUpDofSetEquationIds()
{
// Firstly check that the DOF set is initialized
KRATOS_ERROR_IF_NOT(mDofSetIsInitialized) << "Trying to set the equation ids. before initializing the DOF set. Please call the SetUpDofSet() before." << std::endl;
KRATOS_ERROR_IF(mEquationSystemSize == 0) << "Trying to set the equation ids. in an empty DOF set (equation system size is 0)." << std::endl;
// Loop the DOF set to assign the equation ids
IndexPartition<int>(mEquationSystemSize).for_each(
[&](int i_dof){
auto it_dof = mDofSet.begin() + i_dof;
it_dof->SetEquationId(i_dof);
}
);
}
/**
* @brief Set the Up Lumped Mass Vector object
* This method sets up the lumped mass vector used in the explicit update.
* Note that it requires that the equation ids. are already set and the
* implementation of the mass contributions to be done in the element level
* in the CalculateLumpedMassVector method.
* @param rModelPart The model part to compute
*/
virtual void SetUpLumpedMassVector(const ModelPart &rModelPart)
{
KRATOS_TRY;
KRATOS_INFO_IF("ExplicitBuilder", this->GetEchoLevel() > 1) << "Setting up the lumped mass matrix vector" << std::endl;
// Initialize the lumped mass matrix vector
// Note that the lumped mass matrix vector size matches the dof set one
mpLumpedMassVector = TSystemVectorPointerType(new TSystemVectorType(GetDofSet().size()));
TDenseSpace::SetToZero(*mpLumpedMassVector);
// Loop the elements to get the lumped mass vector
LocalSystemVectorType elem_mass_vector;
std::vector<std::size_t> elem_equation_id;
const auto &r_elements_array = rModelPart.Elements();
const auto &r_process_info = rModelPart.GetProcessInfo();
const int n_elems = static_cast<int>(r_elements_array.size());
#pragma omp for private(elem_mass_vector) schedule(guided, 512) nowait
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
const auto it_elem = r_elements_array.begin() + i_elem;
// Calculate the elemental lumped mass vector
it_elem->CalculateLumpedMassVector(elem_mass_vector, r_process_info);
it_elem->EquationIdVector(elem_equation_id, r_process_info);
// Update value of lumped mass vector
for (IndexType i = 0; i < elem_equation_id.size(); ++i) {
AtomicAdd((*mpLumpedMassVector)[elem_equation_id[i]], elem_mass_vector(i));
}
}
// Set the lumped mass vector flag as true
mLumpedMassVectorIsInitialized = true;
KRATOS_CATCH("");
}
/**
* @brief Initialize the DOF set reactions
* For an already initialized dof set (mDofSet), this method sets to
* zero the corresponding reaction variable values. Note that in the
* explicit build the reactions are used as residual container.
*/
virtual void InitializeDofSetReactions()
{
// Firstly check that the DOF set is initialized
KRATOS_ERROR_IF_NOT(mDofSetIsInitialized) << "Trying to initialize the explicit residual but the DOFs set is not initialized yet." << std::endl;
KRATOS_ERROR_IF(mEquationSystemSize == 0) << "Trying to set the equation ids. in an empty DOF set (equation system size is 0)." << std::endl;
// Loop the reactions to initialize them to zero
block_for_each(
mDofSet,
[](DofType& rDof){
rDof.GetSolutionStepReactionValue() = 0.0;
}
);
}
/**
* @brief It computes the reactions of the system
* @param rModelPart The model part to compute
*/
virtual void CalculateReactions()
{
if (mCalculateReactionsFlag) {
// Firstly check that the DOF set is initialized
KRATOS_ERROR_IF_NOT(mDofSetIsInitialized) << "Trying to initialize the explicit residual but the DOFs set is not initialized yet." << std::endl;
KRATOS_ERROR_IF(mEquationSystemSize == 0) << "Trying to set the equation ids. in an empty DOF set (equation system size is 0)." << std::endl;
// Calculate the reactions as minus the current value
// Note that we take advantage of the fact that Kratos always works with a residual based formulation
// This means that the reactions are minus the residual. As we use the reaction as residual container
// during the explicit resolution of the problem, the calculate reactions is as easy as switching the sign
block_for_each(
mDofSet,
[](DofType& rDof){
auto& r_reaction_value = rDof.GetSolutionStepReactionValue();
r_reaction_value *= -1.0;
}
);
}
}
/**
* @brief This method validate and assign default parameters
* @param rParameters Parameters to be validated
* @param DefaultParameters The default parameters
* @return Returns validated Parameters
*/
virtual Parameters ValidateAndAssignParameters(
Parameters ThisParameters,
const Parameters DefaultParameters
) const
{
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
return ThisParameters;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
virtual void AssignSettings(const Parameters ThisParameters)
{
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
}; /* Class ExplicitBuilder */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_EXPLICIT_BUILDER defined */
|
interpolation_p1.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_p1_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | o | o |
// +---+---+---+---+
// | | x | x | |
//
// CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
// piecewise linear interpolation... NOTE, BC's must have been previously applied
write[write_ijk] = prescale_f*write[write_ijk] +
0.421875*read[read_ijk ] +
0.140625*read[read_ijk +delta_k] +
0.140625*read[read_ijk +delta_j ] +
0.046875*read[read_ijk +delta_j+delta_k] +
0.140625*read[read_ijk+delta_i ] +
0.046875*read[read_ijk+delta_i +delta_k] +
0.046875*read[read_ijk+delta_i+delta_j ] +
0.015625*read[read_ijk+delta_i+delta_j+delta_k];
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise linear interpolation
void interpolation_p1(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX);
apply_BCs_p1(level_c,id_c,STENCIL_SHAPE_BOX);
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level_f->interpolation.num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level_c->interpolation.num_blocks[0]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_p1_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level_c->interpolation.num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_send += (_timeEnd-_timeStart);
}
#endif
// perform local interpolation... try and hide within Isend latency...
if(level_c->interpolation.num_blocks[1]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_p1_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages>0){
_timeStart = getTime();
MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = getTime();
level_f->timers.interpolation_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level_f->interpolation.num_blocks[2]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_unpack += (_timeEnd-_timeStart);
}
#endif
level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart);
}
|
GB_unaryop__lnot_uint16_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_fp64
// op(A') function: GB_tran__lnot_uint16_fp64
// C type: uint16_t
// A type: double
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_fp64
(
uint16_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_dense_ewise3_noaccum_template.c | //------------------------------------------------------------------------------
// GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_unused.h"
{
//--------------------------------------------------------------------------
// get A, B, and C
//--------------------------------------------------------------------------
// any matrix may be aliased to any other (C==A, C==B, and/or A==B)
GB_ATYPE *Ax = (GB_ATYPE *) A->x ;
GB_BTYPE *Bx = (GB_BTYPE *) B->x ;
GB_CTYPE *Cx = (GB_CTYPE *) C->x ;
const int64_t cnz = GB_NNZ (C) ;
ASSERT (GB_is_dense (A)) ;
ASSERT (GB_is_dense (B)) ;
ASSERT (GB_is_dense (C)) ;
int64_t p ;
//--------------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//--------------------------------------------------------------------------
#if GB_CTYPE_IS_BTYPE
if (C == B)
{
//----------------------------------------------------------------------
// C = A+C where A and C are dense
//----------------------------------------------------------------------
// C and B cannot be aliased if their types differ
#if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL
// C += A via GB_cblas_saxpy or GB_cblas_daxpy
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Ax, Cx, nthreads) ; // C += A
#elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL
// C -= A via GB_cblas_saxpy or GB_cblas_daxpy
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Ax, Cx, nthreads) ; // C -= A
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = Ax [p]
GB_BINOP (GB_CX (p), aij, GB_CX (p)) ; // Cx [p] = aij + Cx [p]
}
#endif
}
else
#endif
#if GB_CTYPE_IS_ATYPE
if (C == A)
{
//----------------------------------------------------------------------
// C = C+B where B and C are dense
//----------------------------------------------------------------------
#if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL
// C += B via GB_cblas_saxpy or GB_cblas_daxpy
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B
#elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL
// C -= B via GB_cblas_saxpy or GB_cblas_daxpy
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETB (bij, Bx, p) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), GB_CX (p), bij) ; // Cx [p] += bij
}
#endif
}
else
#endif
{
//----------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//----------------------------------------------------------------------
// note that A and B may still be aliased to each other
#if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL
// C = A+B via GB_cblas_saxpy or GB_cblas_daxpy
GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B
#elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL
// C = A-B via GB_cblas_saxpy or GB_cblas_daxpy
GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = Ax [p]
GB_GETB (bij, Bx, p) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), aij, bij) ; // Cx [p] = aij + bij
}
#endif
}
}
|
GB_unaryop__abs_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_bool
// op(A') function: GB_tran__abs_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)4 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
float* g00 = g0.row(p / 8);
g00[0] = k00[0];
g00[1] = k10[0];
g00[2] = k20[0];
g00[3] = k30[0];
g00[4] = k40[0];
g00[5] = k50[0];
g00[6] = k60[0];
g00[7] = k70[0];
g00 += 8;
g00[0] = k01[0];
g00[1] = k11[0];
g00[2] = k21[0];
g00[3] = k31[0];
g00[4] = k41[0];
g00[5] = k51[0];
g00[6] = k61[0];
g00[7] = k71[0];
g00 += 8;
g00[0] = k02[0];
g00[1] = k12[0];
g00[2] = k22[0];
g00[3] = k32[0];
g00[4] = k42[0];
g00[5] = k52[0];
g00[6] = k62[0];
g00[7] = k72[0];
g00 += 8;
g00[0] = k03[0];
g00[1] = k13[0];
g00[2] = k23[0];
g00[3] = k33[0];
g00[4] = k43[0];
g00[5] = k53[0];
g00[6] = k63[0];
g00[7] = k73[0];
g00 += 8;
g00[0] = k04[0];
g00[1] = k14[0];
g00[2] = k24[0];
g00[3] = k34[0];
g00[4] = k44[0];
g00[5] = k54[0];
g00[6] = k64[0];
g00[7] = k74[0];
g00 += 8;
g00[0] = k05[0];
g00[1] = k15[0];
g00[2] = k25[0];
g00[3] = k35[0];
g00[4] = k45[0];
g00[5] = k55[0];
g00[6] = k65[0];
g00[7] = k75[0];
g00 += 8;
g00[0] = k06[0];
g00[1] = k16[0];
g00[2] = k26[0];
g00[3] = k36[0];
g00[4] = k46[0];
g00[5] = k56[0];
g00[6] = k66[0];
g00[7] = k76[0];
g00 += 8;
g00[0] = k07[0];
g00[1] = k17[0];
g00[2] = k27[0];
g00[3] = k37[0];
g00[4] = k47[0];
g00[5] = k57[0];
g00[6] = k67[0];
g00[7] = k77[0];
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
_mm256_fmadd_ps4(_sum0, _w0, _w1, _w2, _w3, _val00, _val01, _val02, _val03);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
_mm256_fmadd_ps4(_sum0, _w4, _w5, _w6, _w7, _val04, _val05, _val06, _val07);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
_mm256_fmadd_ps4(_sum1, _w0, _w1, _w2, _w3, _val10, _val11, _val12, _val13);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_mm256_fmadd_ps4(_sum1, _w4, _w5, _w6, _w7, _val14, _val15, _val16, _val17);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
_mm256_fmadd_ps4(_sum2, _w0, _w1, _w2, _w3, _val20, _val21, _val22, _val23);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
_mm256_fmadd_ps4(_sum2, _w4, _w5, _w6, _w7, _val24, _val25, _val26, _val27);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
_mm256_fmadd_ps4(_sum3, _w0, _w1, _w2, _w3, _val30, _val31, _val32, _val33);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_mm256_fmadd_ps4(_sum3, _w4, _w5, _w6, _w7, _val34, _val35, _val36, _val37);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
_mm256_fmadd_ps4(_sum4, _w0, _w1, _w2, _w3, _val40, _val41, _val42, _val43);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
_mm256_fmadd_ps4(_sum4, _w4, _w5, _w6, _w7, _val44, _val45, _val46, _val47);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
_mm256_fmadd_ps4(_sum5, _w0, _w1, _w2, _w3, _val50, _val51, _val52, _val53);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_mm256_fmadd_ps4(_sum5, _w4, _w5, _w6, _w7, _val54, _val55, _val56, _val57);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
_mm256_fmadd_ps4(_sum6, _w0, _w1, _w2, _w3, _val60, _val61, _val62, _val63);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
_mm256_fmadd_ps4(_sum6, _w4, _w5, _w6, _w7, _val64, _val65, _val66, _val67);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
_mm256_fmadd_ps4(_sum7, _w0, _w1, _w2, _w3, _val70, _val71, _val72, _val73);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_mm256_fmadd_ps4(_sum7, _w4, _w5, _w6, _w7, _val74, _val75, _val76, _val77);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
_mm256_fmadd_ps4(_sum8, _w0, _w1, _w2, _w3, _val80, _val81, _val82, _val83);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
_mm256_fmadd_ps4(_sum8, _w4, _w5, _w6, _w7, _val84, _val85, _val86, _val87);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
_mm256_fmadd_ps4(_sum9, _w0, _w1, _w2, _w3, _val90, _val91, _val92, _val93);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_mm256_fmadd_ps4(_sum9, _w4, _w5, _w6, _w7, _val94, _val95, _val96, _val97);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
_mm256_fmadd_ps4(_sum10, _w0, _w1, _w2, _w3, _val100, _val101, _val102, _val103);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
_mm256_fmadd_ps4(_sum10, _w4, _w5, _w6, _w7, _val104, _val105, _val106, _val107);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
_mm256_fmadd_ps4(_sum11, _w0, _w1, _w2, _w3, _val110, _val111, _val112, _val113);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_mm256_fmadd_ps4(_sum11, _w4, _w5, _w6, _w7, _val114, _val115, _val116, _val117);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum = _mm256_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads,
2*sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *magick_unused(exception))
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
magick_unreferenced(exception);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset;
if (extent != 0)
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
const Quantum
*magick_restrict p;
const void
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
i,
u;
unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
gamma;
if (fabs((double) (alpha-TransparentAlpha)) < MagickEpsilon)
return(q);
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
return(ClampToQuantum(gamma*MagickOver_((double) p,alpha,(double) q,beta)));
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
alpha;
ssize_t
i;
alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(q[i],alpha,p[i],GetPixelAlpha(image,p));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->type=DiskCache;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
Quantum
*magick_restrict q;
ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const unsigned char
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const Quantum
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
cryptorand.c | /**
* Copyright (c) 2016, Kevin Lewi
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "cryptorand.h"
void cryptorand_init(cryptorand_t state) {
cryptorand_initseed(state, DEFAULT_SEED, NULL);
}
void cryptorand_initseed(cryptorand_t state, char *seed, char *additional) {
state->cryptorand_init = 1;
state->ctr = 0;
if(!(state->iv = malloc(EVP_CIPHER_iv_length(AES_ALGORITHM)))) {
perror("Error setting IV for state");
return;
}
memset(state->iv, 0, EVP_CIPHER_iv_length(AES_ALGORITHM));
if(additional == NULL) {
additional = "";
}
char *digest;
if(!(digest = malloc(strlen(seed) + strlen(additional) + 1))) {
perror("Error setting SHA digest");
return;
}
digest[0] = 0;
strcat(digest, seed);
strcat(digest, additional);
SHA256_CTX sha256;
if(!SHA256_Init(&sha256)) {
perror("Error in calling SHA256_Init");
}
else if(!SHA256_Update(&sha256, digest, strlen(digest))) {
perror("Error in calling SHA256_Update");
}
else if(!SHA256_Final(state->key, &sha256)) {
perror("Error in calling SHA256_Final");
}
free(digest);
}
void cryptorand_clear(cryptorand_t state) {
free(state->iv);
}
void fmpz_randm_crypto(fmpz_t f, cryptorand_t state, const fmpz_t m) {
mpz_t x, rop;
mpz_init(x);
mpz_init(rop);
fmpz_get_mpz(x, m);
mpz_urandomm_crypto(rop, state, x);
fmpz_set_mpz(f, rop);
mpz_clear(x);
mpz_clear(rop);
}
void mpz_urandomm_crypto(mpz_t rop, cryptorand_t state, const mpz_t m) {
unsigned long size = mpz_sizeinbase(m, 2);
while(1) {
mpz_urandomb_crypto(rop, state, size);
if(mpz_cmp(rop, m) < 0) {
break;
}
}
}
void mpz_urandomb_crypto(mpz_t rop, cryptorand_t state, mp_bitcnt_t n) {
unsigned long ctr_iv;
#pragma omp critical(update_iv_counter)
{
// update the internal counter, works at most 2^64 times
ctr_iv = state->ctr++;
}
memcpy(state->iv, &ctr_iv, sizeof(ctr_iv));
mp_bitcnt_t nb = n/8+1; // number of bytes
if(!(state->ctx = EVP_CIPHER_CTX_new())) {
perror("Error in initializing new cipher context");
return;
}
if(!EVP_EncryptInit_ex(state->ctx, AES_ALGORITHM, NULL, state->key,
state->iv)) {
perror("Error in calling EncryptInit");
return;
}
unsigned char *output;
if(!(output = malloc(2 * (nb + EVP_MAX_IV_LENGTH)))) {
perror("Error in initializing output buffer");
return;
}
mp_bitcnt_t outlen = 0;
int in_size = nb;
unsigned char in[in_size];
memset(in, 0, in_size);
while(outlen < nb) {
int buflen = 0;
if(!EVP_EncryptUpdate(state->ctx, output+outlen, &buflen, in, in_size)) {
perror("Error in calling EncryptUpdate");
goto output_exit;
}
outlen += buflen;
}
int final_len = 0;
if(!EVP_EncryptFinal(state->ctx, output+outlen, &final_len)) {
perror("Error in calling EncryptFinal");
goto output_exit;
}
outlen += final_len;
if(outlen > nb) {
outlen = nb; // we will only use nb bytes
}
mp_bitcnt_t true_len = outlen + 4;
mp_bitcnt_t bytelen = outlen;
unsigned char *buf;
if(!(buf = malloc(true_len))) {
perror("Error in initializing buf");
goto output_exit;
}
memset(buf, 0, true_len);
memcpy(buf+4, output, outlen);
buf[4] >>= ((outlen*8) - (unsigned int) n);
for(int i = 3; i >= 0; i--) {
buf[i] = (unsigned char) (bytelen % (1 << 8));
bytelen /= (1 << 8);
}
// generate a random n-bit number
FILE *fp;
if(!(fp = fmemopen(buf, true_len, "rb"))) {
perror("Error in calling fmemopen");
goto buf_exit;
}
if(!mpz_inp_raw(rop, fp)) {
fprintf(stderr, "Error in parsing randomness.\n");
}
fclose(fp);
buf_exit:
free(buf);
output_exit:
free(output);
EVP_CIPHER_CTX_cleanup(state->ctx);
free(state->ctx);
}
|
DRACC_OMP_056_Counter_working_critical_no.c | /*
Counter incrementation with working critical sections. Intra Region.
*/
#include <stdio.h>
#define N 100000
int countervar = 0;
int count(){
#pragma omp target map(tofrom:countervar) device(0)
#pragma omp teams num_teams(1)
#pragma omp distribute parallel for
for (int i=0; i<N; i++){
#pragma omp critical
countervar++;
#pragma omp critical
countervar -= 2;
}
return 0;
}
int main(){
count();
printf("counter: %i expected: -100000\n ",countervar);
return 0;
} |
blocking-omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include "mpi.h"
struct timeval startwtime, endwtime1, endwtime2;
double seq_time1, seq_time2;
double **minDist;
double **minLabels;
double* packer(double **received, int blocksize, int LINESIZE);
double** unpacker(double *toReceive,int blocksize, int LINESIZE);
void knnSearch(int rank, int l, double **local, double **received, int blocksize,int LINESIZE, int nbrs);
void pointCompare(long i, long j, int nbrs, double *pointA, double *pointB, int LINESIZE);
double Ndistance(double *pointA, double *pointB, int LINESIZE);
void bubbleSort(int i,int nbrs);
void swap(int i, int k);
int main(int argc, char** argv){
char filename[100];
int MAX = 0;
int LINESIZE = 0;
int nbrs = 0;
int i,j,l;
int blocksize = 0;
double **local;
double **received;
double *toSend;
double *toReceive;
int rank;
int p; //==number of procs
MPI_File fp;
MPI_File fpResults;
MPI_Status status;
MPI_Offset offset;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if(argc!=5){
printf("ERROR: usage:\n%s filename MAX nbrs threads\n", argv[0]);
printf("filename is the name of the .bin file to take data from\n");
printf("MAX is the number of elements to take part in the search\n");
printf("nbrs is the number of the nearest neighbours search for each point\n");
printf("threads is the number of threads to be used for OpenMP\n");
exit(1);
}
MAX = atoi(argv[2]);
nbrs = atoi(argv[3]);
blocksize = MAX/p;
omp_set_num_threads(atoi(argv[4]));
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
LINESIZE = 30;
}
else{
LINESIZE = 784;
}
//creating blocks
local = (double **) malloc(blocksize*sizeof(double*));
received = (double **) malloc(blocksize*sizeof(double*));
toSend = (double *) malloc(blocksize*LINESIZE*sizeof(double));
toReceive = (double *) malloc(blocksize*LINESIZE*sizeof(double));
for(i=0; i<blocksize; i++){
local[i] = (double *) malloc(LINESIZE*sizeof(double));
received[i] = (double *) malloc(LINESIZE*sizeof(double));
}
//initialising results array
minDist = (double **) malloc(MAX*sizeof(double*));
minLabels = (double **) malloc(MAX*sizeof(double*));
for(i=0; i<MAX; i++){
minDist[i] = (double *) malloc(nbrs*sizeof(double));
minLabels[i] = (double *) malloc(nbrs*sizeof(double));
for(j=0; j<nbrs; j++){
//presetting minDist to sth very big
minDist[i][j] = 1000;
minLabels[i][j] = -1;
}
}
strcat(filename, ".bin");
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fp)){
printf("Error reading file from Process %d (fp)\n", rank);
exit(1);
}
else{
//block reading
if(rank==0){
printf("Initialising kNN search for problem size %d and k = %d\nusing archive %s\n", MAX, nbrs, filename);
}
for(i=0; i<blocksize; i++){
for(j=0; j<LINESIZE ; j++){
offset = rank*blocksize*LINESIZE*sizeof(double)+i*LINESIZE*sizeof(double)+j*sizeof(double);
MPI_File_read_at(fp, offset, &local[i][j], 1, MPI_DOUBLE, &status);
}
}
//blockprint(local, blocksize, rank);
MPI_Barrier(MPI_COMM_WORLD);
MPI_File_close(&fp);
knnSearch(rank, rank, local, local, blocksize,LINESIZE, nbrs);
}
//2d array to 1d array:
toSend = packer(local, blocksize, LINESIZE);
if(rank==0) gettimeofday (&startwtime, NULL);
//circulation of blocks
for(l=0; l<(p-1); l++){
if(rank!=0){
MPI_Recv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, rank-1, 10, MPI_COMM_WORLD, &status);
}
if(rank!=(p-1)){
MPI_Ssend(toSend, blocksize*LINESIZE, MPI_DOUBLE, rank+1, 10, MPI_COMM_WORLD);
}
else{
MPI_Ssend(toSend, blocksize*LINESIZE, MPI_DOUBLE, 0, 10, MPI_COMM_WORLD);
}
if(rank==0){
MPI_Recv(toReceive, blocksize*LINESIZE, MPI_DOUBLE, p-1, 10, MPI_COMM_WORLD, &status);
}
//1d to 2d array
received = unpacker(toReceive, blocksize, LINESIZE);
int tmp = status.MPI_SOURCE;
for(int t=0; t<l ;t++){
tmp--;
if(tmp<0) tmp = p-1;
}
//time for blocking COMMS
//only on the last rep will count
//rank 0 recieves last
if((rank==0) && (l==(p-2)) ) gettimeofday (&endwtime1, NULL);
knnSearch(rank, tmp, local, received, blocksize, LINESIZE, nbrs);
toSend = packer(received, blocksize, LINESIZE);
}
//preparing to send results to proc 0
toSend = (double *) realloc(toSend, blocksize*nbrs*sizeof(double));
toReceive = (double *) realloc(toReceive, blocksize*nbrs*sizeof(double));
if(rank==0){
for(i=1; i<p; i++){
MPI_Recv(toSend, blocksize*nbrs, MPI_DOUBLE, i, 15, MPI_COMM_WORLD, &status);
received = unpacker(toSend, blocksize, nbrs);
for(j=0; j<blocksize; j++){
for(int k=0; k<nbrs; k++){
minDist[status.MPI_SOURCE*blocksize+j][k] = received[j][k];
}
}
MPI_Recv(toReceive, blocksize*nbrs, MPI_DOUBLE, i, 20, MPI_COMM_WORLD, &status);
received = unpacker(toReceive, blocksize, nbrs);
for(j=0; j<blocksize; j++){
for(int k=0; k<nbrs; k++){
minLabels[status.MPI_SOURCE*blocksize+j][k] = received[j][k];
}
}
}
}
else{
//toSend buffer used for minDist
//toReceive buffer used for minLabels
for(i=0; i<blocksize; i++){
for(j=0; j<nbrs; j++){
toSend[i*nbrs+j] = minDist[rank*blocksize+i][j];
toReceive[i*nbrs+j] = minLabels[rank*blocksize+i][j];
}
}
MPI_Send(toSend, blocksize*nbrs, MPI_DOUBLE, 0, 15, MPI_COMM_WORLD);
MPI_Send(toReceive, blocksize*nbrs, MPI_DOUBLE, 0, 20, MPI_COMM_WORLD);
}
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
if(rank==0){
gettimeofday (&endwtime2, NULL);
}
/*
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-labels-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking-labels.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
strcpy(filename, argv[1]);
if(!strcmp(filename,"trainX_svd")){
strcpy(filename, "results-mpi-blocking-dist-svd.txt");
}
else{
strcpy(filename, "results-mpi-blocking-dist.txt");
}
if(MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fpResults)){
printf("Error opening file from Process %d (fp)\n", rank);
exit(1);
}
//printing results in two seperate files
for(i=0; i<blocksize; i++){
char buf[100];
for(j=0; j<nbrs; j++){
if(minLabels[rank*blocksize+i][j]==-1) printf("ERROR\n");
//offset = rank*blocksize*LINESIZE*sizeof(char)+i*LINESIZE*sizeof(char)+j*sizeof(char);
offset = rank*blocksize*LINESIZE*sizeof(char)+i*LINESIZE*sizeof(char)+j*sizeof(char);
sprintf(buf, "%f ", minLabels[rank*blocksize+i][j]);
MPI_File_write_at(fp, offset, buf, strlen(buf), MPI_CHAR, &status);
sprintf(buf, "%f ", minDist[rank*blocksize+i][j]);
MPI_File_write_at(fpResults, offset, buf, strlen(buf), MPI_CHAR, &status);
//printf("#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
}
}
*/
if(rank==0){
//printing results in a sigle file in easily readable form from proc 0 ONLY
for(i=0; i<p*blocksize; i++){
char buf[100];
sprintf( buf, "Top %d closest to point %d:\n",nbrs, i);
MPI_File_write(fp, buf, strlen(buf), MPI_CHAR, &status);
//printf("Top %d closest to point %d:\n",nbrs, i);
for(j=0; j<nbrs; j++){
if(minLabels[i][j]==-1) printf("ERROR\n");
sprintf(buf, "#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
MPI_File_write(fp, buf, strlen(buf), MPI_CHAR, &status);
//printf("#%d: %d with a distance of %f\n", j+1, (int) minLabels[i][j], minDist[i][j]);
}
}
}
if(rank==0){
seq_time1 = (double)((endwtime1.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime1.tv_sec - startwtime.tv_sec);
printf("COMMS Wall clock time = %f\n", seq_time1);
seq_time2 = (double)((endwtime2.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime2.tv_sec - startwtime.tv_sec);
printf("FINAL Wall clock time = %f\n", seq_time2);
printf("\nJob Done.\n");
}
MPI_File_close(&fp);
MPI_File_close(&fpResults);
free(local);
free(received);
free(toSend);
free(toReceive);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return (0);
}
double* packer(double **received,int blocksize, int LINESIZE){
int i,j;
double *temp;
temp = (double *) malloc(blocksize*LINESIZE*sizeof(double));
for(i=0; i<blocksize; i++){
for(j=0; j<LINESIZE; j++){
temp[i*LINESIZE+j]=received[i][j];
}
}
return temp;
}
double** unpacker(double *toReceive,int blocksize, int LINESIZE){
int i,j;
double **temp;
temp = (double **) malloc(blocksize*sizeof(double));
for(i=0; i<blocksize; i++){
temp[i] = (double *) malloc(LINESIZE*sizeof(double));
for(j=0; j<LINESIZE; j++){
temp[i][j]= toReceive[i*LINESIZE+j];
}
}
return temp;
}
void knnSearch(int rank, int l, double **local, double **received, int blocksize, int LINESIZE, int nbrs){
int i,j,k;
double *pointA;
double *pointB;
pointA = (double *) malloc(LINESIZE*sizeof(double));
pointB = (double *) malloc(LINESIZE*sizeof(double));
#pragma omp parallel for
for(i=0; i<blocksize; i++){
//reading pointA from block local
for(k=0; k<LINESIZE; k++){
pointA[k]=local[i][k];
}
for(j=0; j<blocksize; j++){
//reading pointB from block received
for(k=0; k<LINESIZE; k++){
pointB[k]=received[j][k];
}
pointCompare(rank*blocksize+i, l*blocksize+j, nbrs, pointA, pointB, LINESIZE);
}
}
}
void pointCompare(long i, long j, int nbrs, double *pointA, double *pointB, int LINESIZE){
double dist=0;
int k,n;
//calculating distance
dist=Ndistance(pointA, pointB, LINESIZE);
//sorting top k closest neighbours
bubbleSort(i, nbrs);
for(n=0; n<nbrs ; n++){
//if dist = 0 then pointA=pointB
if(dist>0 && dist<minDist[i][n]){
//pushing back all elements
//from the end to the point where this new dist will be inserted
for(k=(nbrs-1); k>n; k--){
minDist[i][k] = minDist[i][k-1];
minLabels[i][k] = minLabels[i][k-1];
}
minDist[i][n] = dist;
minLabels[i][n] = j;
break;
}
}
}
double Ndistance(double *pointA, double *pointB, int LINESIZE){
double dist=0;
for(int k=0; k<LINESIZE; k++){
dist += pow(pointA[k]-pointB[k],2);
}
return sqrt(dist);
}
void bubbleSort(int i,int nbrs){
int j,k;
for(j=0; j<nbrs; j++){
for(k=(nbrs-1); k>j; k--){
if(minDist[i][k-1]>minDist[i][k]){
swap(i,k);
}
}
}
}
void swap(int i, int k){
double tmp;
tmp = minDist[i][k-1];
minDist[i][k-1] = minDist[i][k];
minDist[i][k] = tmp;
tmp = minLabels[i][k-1];
minLabels[i][k-1] = minLabels[i][k];
minLabels[i][k] = tmp;
} |
UnbalancedSliced.h | #pragma once
/*
Copyright (c) 2019 CNRS
Nicolas Bonneel <nicolas.bonneel@liris.cnrs.fr>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIEDi
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <vector>
#include <algorithm>
#include <utility>
#include <chrono>
#include <ctime>
#include <cstring>
#include <cmath>
#include <omp.h>
#include <list>
#include <thread>
#include <random>
#define cimg_display 0
#include "CImg.h"
#include "Point.h"
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <immintrin.h>
#if __linux__
#include <malloc.h>
#endif
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323856
#endif
float cost(float x, float y);
double cost(double x, double y);
__m256 cost(const __m256 &x, const __m256 &y);
__m256d cost(const __m256d &x, const __m256d &y);
double sumCosts(const double* h1, int start1, const double* h2, int start2, int n);
float sumCosts(const float* h1, int start1, const float* h2, int start2, int n);
void * malloc_simd(const size_t size, const size_t alignment);
void free_simd(void* mem);
struct params {
params() {};
params(int d0, int f0, int d1, int f1, int d) :start0(d0), end0(f0), start1(d1), end1(f1) {};
int start0, end0, start1, end1;
};
#ifdef __APPLE__
static std::default_random_engine engine(10); // 10 = random seed
static std::uniform_real_distribution<double> uniform(0, 1);
#else
static thread_local std::default_random_engine engine(10); // 10 = random seed
static thread_local std::uniform_real_distribution<double> uniform(0, 1);
#endif
template<typename T>
Point<2, T> BoxMuller() {
double r1 = uniform(engine);
double r2 = uniform(engine);
Point<2, T> p;
double f = sqrt(-2 * log(std::max(1E-12, std::min(1. - 1E-12, r1))));
p[0] = f * cos(2 * M_PI*r2);
p[1] = f * sin(2 * M_PI*r2);
return p;
}
template<int DIM, typename T>
struct Projector {
Projector(const Point<DIM, T> &dir) : dir(dir) {};
double proj(const Point<DIM, T> &p) {
double proj = 0;
for (int i = 0; i < DIM; i++) {
proj += p[i] * dir[i];
}
return proj;
}
Point<DIM, T> dir;
};
class UnbalancedSliced {
public:
// nearest neighbors in 1d
template<typename T>
void nearest_neighbor_match(const T *hist1, const T* hist2, const params &p, std::vector<int> &assignment) {
int cursor = p.start1;
for (int i = p.start0; i < p.end0; i++) {
T mind = std::numeric_limits<T>::max();
int minj = -1;
for (int j = std::max(p.start1, cursor); j < p.end1; j++) {
T d = cost(hist1[i], hist2[j]);
cursor = j - 1;
if (d <= mind) {
mind = d;
minj = j;
} else
if (d > mind+std::numeric_limits<T>::epsilon()) {
break;
}
}
assignment[i] = minj;
}
}
// handles the case where the first sequence starts before or ends after the second sequence, or where the NN of the first (resp. last) elements of hist1 are the first (resp. last) elements of hist2
// also restricts problem size based on the number of non-injective values, but that won't be super useful
// returns 1 if hist1 entirely consumed ; 0 otherwise
template<typename T>
int reduce_range(const T *hist1, const T* hist2, std::vector<int> &assignment, params &inparam, T& emd, int* assNN, int nbbij) {
params p0 = inparam;
/// hist1 (partly) at the left of hist2 : can match the outside of hist1 to the begining of hist2
int cursor1 = inparam.start1;
int min0 = inparam.start0;
T localchange = 0;
for (int i = inparam.start0; i < inparam.end0; i++) {
if (hist1[i] <= hist2[cursor1]) {
assignment[i] = cursor1;
localchange += cost(hist1[i], hist2[cursor1]);
cursor1++;
min0 = i + 1;
} else break;
}
inparam.start0 = min0;
inparam.start1 = cursor1;
if (inparam.end0 == inparam.start0) {
#pragma omp atomic
emd += localchange;
return 1;
}
/// hist1 (partly) at the right of hist2 : can match the outside of hist1 to the end of hist2
int cursor1b = inparam.end1 - 1;
int max0 = inparam.end0 - 1;
for (int i = inparam.end0 - 1; i >= inparam.start0; i--) {
if (hist1[i] >= hist2[cursor1b]) {
assignment[i] = cursor1b;
localchange += cost(hist1[i], hist2[cursor1b]);
cursor1b--;
max0 = i - 1;
} else break;
}
inparam.end0 = max0 + 1;
inparam.end1 = cursor1b + 1;
if (inparam.end0 == inparam.start0) {
#pragma omp atomic
emd += localchange;
return 1;
}
// non-injective matches not super useful anymore.
// We don't need to match to the entire range of hist2 : only the range of the nearest neighbors of the bounds of hist1, +/- the number of non-injective values
int M = inparam.end0 - inparam.start0;
inparam.start1 = std::max(inparam.start1, assNN[inparam.start0] - nbbij);
inparam.end1 = std::min(inparam.end1, assNN[inparam.end0 - 1] + nbbij + 1); // +1 since bound is excluded
//if the NN of the begining of hist1 are matched to the first values of hist2, then they should be matched
int cursor = inparam.start1;
int i;
for (i = inparam.start0; i < inparam.end0; i++) {
if (assNN[i] == cursor && (i == inparam.end0 - 1 || assNN[i + 1] != assNN[i])) {
assignment[i] = cursor;
localchange += cost(hist1[i], hist2[assignment[i]]);
cursor++;
} else break;
}
inparam.start0 = i;
inparam.start1 = cursor;
if (inparam.start0 == inparam.end0) {
#pragma omp atomic
emd += localchange;
return 1;
}
int prevfin = inparam.end0;
int prevend1 = inparam.end1;
//if the NN of the end of hist1 are matched to the last values of hist2, then they should be matched
cursor = inparam.end1 - 1;
for (i = inparam.end0 - 1; i >= inparam.start0; i--) {
if (assNN[i] == cursor && (i == inparam.start0 || assNN[i - 1] != assNN[i])) {
assignment[i] = cursor;
localchange += cost(hist1[i], hist2[assignment[i]]);
cursor--;
} else break;
}
inparam.end0 = i + 1;
inparam.end1 = cursor + 1;
if (inparam.start0 == inparam.end0) {
#pragma omp atomic
emd += localchange;
return 1;
}
#pragma omp atomic
emd += localchange;
return 0;
}
// handles trivial cases: M==N, M==N-1, M==1, or nearest neighbor map is injective
// return 1 = subproblem solved
// return 0 = problem not solved
template<typename T>
int handle_simple_cases(const params &p, const T* hist1, const T* hist2, int* assignment, int* assNN, T &value) {
int start0 = p.start0;
int start1 = p.start1;
int end0 = p.end0;
int end1 = p.end1;
int M = end0 - start0;
int N = end1 - start1;
if (M == 0) return 1;
if (M == N) {
T d = 0;
for (int i = 0; i < M; i++) {
assignment[start0 + i] = i + start1;
d += cost(hist1[start0 + i], hist2[start1 + i]);
}
#pragma omp atomic
value += d;
return 1;
}
if (M == N - 1) {
T d1 = 0;
T d2 = 0;
for (int i = 0; i < M; i++) {
d2 += cost(hist1[start0 + i], hist2[start1 + i + 1]);
}
T d = 0;
T b = d2;
T best_s = d2; // this is actually optional: this is a constant.
int besti = -1;
for (int i = 0; i < M; i++) {
d1 += cost(hist1[start0 + i], hist2[start1 + i]); // forward cost
b -= cost(hist1[start0 + i], hist2[start1 + i + 1]); // backward cost
T s = b + d1;
if (s < best_s) {
best_s = s;
besti = i;
}
}
for (int i = 0; i < M; i++) {
if (i <= besti) {
assignment[start0 + i] = i + start1;
} else {
assignment[start0 + i] = i + 1 + start1;
}
}
#pragma omp atomic
value += best_s;
return 1;
}
if (M == 1) {
assignment[start0] = assNN[start0];
T c = cost(hist1[start0], hist2[assNN[start0]]);
#pragma omp atomic
value += c;
return 1;
}
// checks if NN is injective
{
int curId = 0;
T sumMin = 0;
bool valid = true;
for (int i = 0; i < M; i++) {
int ass;
T h1 = hist1[start0 + i];
T mini = std::numeric_limits<T>::max();
for (int j = curId; j < N; j++) {
T v = cost(h1, hist2[start1 + j]);
curId = j;
if (v < mini) {
mini = v;
ass = j + start1;
}
if (j < N - 1) {
T vnext = cost(h1, hist2[start1 + j + 1]);
if (vnext > v) break;
}
}
if (mini == std::numeric_limits<T>::max()) {
valid = false;
break;
}
if (i > 0 && ass == assignment[start0 + i - 1]) {
valid = false;
break;
}
sumMin += mini;
assignment[start0 + i] = ass;
}
if (valid) {
#pragma omp atomic
value += sumMin;
return 1;
}
}
return 0;
}
// decompose a problem into subproblems in (quasi) linear time.
template<typename T>
bool linear_time_decomposition(const params &p, const T* hist1, const T* hist2, int* assNN, std::vector<params>& newp) {
if (p.end0 - p.start0 < 20) { // not worth splitting already tiny problems
return false;
}
int N = p.end1 - p.start1;
std::vector<int> taken(N, -1);
std::vector<int> ninj(N, 0);
taken[assNN[p.start0] - p.start1] = p.start0;
ninj[assNN[p.start0] - p.start1]++;
std::vector<int> prev_free(p.end1 - p.start1);
std::vector<int> next_free(p.end1 - p.start1);
for (int i = 0; i < prev_free.size(); i++) {
prev_free[i] = i;
next_free[i] = i;
}
int first_right = assNN[p.start0 + 1] - p.start1, last_left = assNN[p.start0 + 1] - p.start1;
for (int i = p.start0 + 1; i < p.end0; i++) {
int ass = assNN[i];
int assOffset = ass - p.start1;
ninj[assOffset]++;
if ((taken[assOffset]) < 0) {
taken[assOffset] = i;
first_right = assOffset;
last_left = assOffset;
} else {
if (ninj[assOffset] > 1) {
int cur = last_left-1;
while (cur >=0) {
if (taken[cur] < 0 || cur == 0) {
taken[cur] = i;
prev_free[assOffset] = cur;
next_free[cur] = next_free[assOffset];
break;
} else {
if (prev_free[cur] == cur) cur--; else
cur = prev_free[cur];
}
}
last_left = std::max(0, cur);
} else {
prev_free[assOffset] = prev_free[prev_free[assOffset]];
}
if (first_right < N - 1) {
first_right++;
}
taken[first_right] = i;
prev_free[first_right] = last_left;
next_free[assOffset] = first_right;
next_free[last_left] = first_right;
}
}
int lastStart = p.start0;
for (int i = p.start1; i < p.end1; i++) {
int assOffset = i - p.start1;
int maxival = taken[assOffset];
if (taken[assOffset] >= 0) {
params curp;
if (next_free[assOffset] == assOffset) {
curp.start1 = p.start1 + assOffset;
curp.start0 = lastStart;
lastStart++;
curp.end0 = curp.start0 + 1;
curp.end1 = curp.start1 + 1;
newp.push_back(curp);
} else {
int right = next_free[assOffset];
while (right<N-1 && next_free[right] != right) {
right = next_free[right];
}
for (int j = assOffset; j <= right; j++) {
maxival = std::max(maxival, taken[j]);
}
curp.start0 = lastStart;
curp.end0 = maxival + 1;
lastStart = curp.end0;
curp.start1 = p.start1 + assOffset;
curp.end1 = p.start1 + right + 1;
newp.push_back(curp);
i = p.start1 + right;
}
}
}
return true;
}
template<typename T>
void simple_solve(const params &p, const T* hist1, const T* hist2, int* assignment, int* assNN, T &value) {
int N = p.end1 - p.start1;
std::vector<int> taken(N, -1);
std::vector<int> ninj(N, 0);
taken[assNN[p.start0] - p.start1] = p.start0;
ninj[assNN[p.start0] - p.start1]++;
std::vector<int> prev_free(p.end1 - p.start1);
std::vector<int> next_free(p.end1 - p.start1);
std::vector<T> cost_dontMove(p.end1 - p.start1, 0);
std::vector<T> cost_moveLeft(p.end1 - p.start1, 0);
int ass0 = assNN[p.start0];
bool lastok = true;
cost_dontMove[assNN[p.start0] - p.start1] = cost(hist1[p.start0], hist2[ass0]);
cost_moveLeft[assNN[p.start0] - p.start1] = (ass0==0)?std::numeric_limits<T>::max():cost(hist1[p.start0], hist2[ass0 -1]);
for (int i = 0; i < prev_free.size(); i++) {
prev_free[i] = i;
next_free[i] = i;
}
int first_right = assNN[p.start0 + 1] - p.start1, last_left = assNN[p.start0 + 1] - p.start1;
for (int i = p.start0 + 1; i < p.end0; i++) {
int ass = assNN[i];
int assOffset = ass - p.start1;
ninj[assOffset]++;
if ((taken[assOffset]) < 0) {
taken[assOffset] = i;
first_right = assOffset;
last_left = assOffset;
cost_dontMove[assOffset] = cost(hist1[i], hist2[ass]);
cost_moveLeft[assOffset] = (ass == 0) ? std::numeric_limits<T>::max() : cost(hist1[i], hist2[ass-1]);
lastok = true;
} else {
T sumDontMove = 0;
T sumMoveLeft = 0;
int cur = prev_free[first_right] - 1;
bool isok = true;
while (cur >= 0) {
sumDontMove += cost_dontMove[cur + 1];
if (cost_moveLeft[cur + 1] < 0) isok = false;
sumMoveLeft += cost_moveLeft[cur + 1];
if (taken[cur] < 0 ) {
break;
} else {
if (prev_free[cur] == cur) {
cur--;
} else {
cur = prev_free[cur]-1;
}
}
}
T cdM;
T cmL;
if (first_right >= N - 1)
cdM = std::numeric_limits<T>::max();
else
cdM = sumDontMove + cost(hist1[i], hist2[p.start1 + first_right + 1]);
if (cur < 0) {
cmL = std::numeric_limits<T>::max();
} else {
if (isok)
cmL = sumMoveLeft + cost(hist1[i], hist2[p.start1 + first_right]);
else {
cmL = 0;
if (cur >= 0 && first_right < N - 1) {
cmL = sumCosts(hist1, i - (first_right - cur), hist2, p.start1 + cur + 1 - 1, first_right-cur+1);
}
}
}
if (cmL < cdM || (first_right >= N - 1)) {
last_left = std::max(0, cur);
taken[last_left] = i;
prev_free[assOffset] = prev_free[last_left];
prev_free[first_right] = prev_free[last_left];
next_free[last_left] = next_free[first_right];
lastok = false;
cost_dontMove[last_left] = cmL;
cost_moveLeft[last_left] = -1; // we invalidate this value
}
else {
first_right++;
taken[first_right] = i;
prev_free[first_right] = prev_free[cur+1];
prev_free[assOffset] = prev_free[cur + 1];
next_free[assOffset] = next_free[first_right];
next_free[cur + 1] = next_free[first_right];
cost_dontMove[cur + 1] = cdM;
cost_moveLeft[cur + 1] = cmL;
lastok = true;
}
}
}
int lastStart = p.start0;
for (int i = p.start1; i < p.end1; i++) {
int assOffset = i - p.start1;
int maxival = taken[assOffset];
if (taken[assOffset] >= 0) {
params curp;
if (next_free[assOffset] == assOffset) {
curp.start1 = p.start1 + assOffset;
curp.start0 = lastStart;
lastStart++;
curp.end0 = curp.start0 + 1;
curp.end1 = curp.start1 + 1;
for (int j = 0; j < curp.end0 - curp.start0; j++) {
assignment[curp.start0 + j] = curp.start1 + j;
value += cost(hist1[curp.start0 + j], hist2[curp.start1 + j]);
}
}
else {
int right = next_free[assOffset];
while (right < N - 1 && next_free[right] != right) {
right = next_free[right];
}
for (int j = assOffset; j <= right; j++) {
maxival = std::max(maxival, taken[j]);
}
curp.start0 = lastStart;
curp.end0 = maxival + 1;
lastStart = curp.end0;
curp.start1 = p.start1 + assOffset;
curp.end1 = p.start1 + right + 1;
for (int j = 0; j < curp.end0 - curp.start0; j++) {
assignment[curp.start0 + j] = curp.start1 + j;
value += cost(hist1[curp.start0 + j], hist2[curp.start1 + j]);
}
i = p.start1 + right;
}
}
}
}
template<typename T>
T transport1d(const T *hist1, const T* hist2, int M0, int N0, std::vector<int> &assignment, double* timingSplits = NULL) {
assignment.resize(M0);
params initp(0, M0, 0, N0, 0);
T value = 0;
// starts computing nearest neighbor match
std::vector<int> assNN(M0);
nearest_neighbor_match(hist1, hist2, initp, assNN);
// computes the number of non-injective matches in an interval
int nbbij = 0;
for (int i = initp.start0 + 1; i < initp.end0; i++) {
if (assNN[i] == assNN[i - 1]) nbbij++;
}
int ret1 = reduce_range(hist1, hist2, assignment, initp, value, &assNN[0], nbbij);
if (ret1 == 1) return value;
nearest_neighbor_match(hist1, hist2, initp, assNN); // since the bounds of the problem have changed, the NN maps has changed as well
std::vector<params> splits;
bool res = linear_time_decomposition(initp, hist1, hist2, &assNN[0], splits);
std::vector<params > todo;
if (res) {
todo.reserve(splits.size());
for (int i = 0; i < splits.size(); i++) {
if (splits[i].end0 == splits[i].start0 + 1) { // we directly handle problems of size 1 here
assignment[splits[i].start0] = assNN[splits[i].start0];
value += cost(hist1[splits[i].start0], hist2[assNN[splits[i].start0]]);
}
else
todo.push_back(splits[i]);
}
}
else {
todo.push_back(initp);
}
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < todo.size(); i++) {
params p = todo[i];
nearest_neighbor_match(hist1, hist2, p, assNN); // since the bounds of the problem have changed, the NN maps has changed as well
int ret = handle_simple_cases(p, hist1, hist2, &assignment[0], &assNN[0], value);
if (ret == 1) continue;
int nbbij = 0;
for (int i = p.start0 + 1; i < p.end0; i++) {
if (assNN[i] == assNN[i - 1]) nbbij++;
}
ret = reduce_range(hist1, hist2, assignment, p, value, &assNN[0], nbbij);
if (ret == 1) continue;
ret = handle_simple_cases(p, hist1, hist2, &assignment[0], &assNN[0], value);
if (ret == 1) continue;
nearest_neighbor_match(hist1, hist2, p, assNN); // since the bounds of the problem have changed, the NN maps has changed as well
simple_solve(p, hist1, hist2, &assignment[0], &assNN[0], value);
}
return value;
}
// advect = true : used for matching one distrib to another such as in our FIST algorithm : this function will advect cloud1 to cloud2 along a sliced wasserstein flow
// advect = false : used to compute barycenters or sliced EMD (we don't perform any stochastic gradient descent then, this will merely compute the sliced wasserstein distance)
template<int DIM, typename T>
double correspondencesNd(std::vector<Point<DIM, T> > &cloud1, const std::vector<Point<DIM, T> > &cloud2, int niter, bool advect = false) {
// we won't use the indices here for the moment nor the assignment, so if memory is an issue, we can remove the variables below
std::vector<std::pair<T, int > > cloud1Idx(cloud1.size());
std::vector<std::pair<T, int > > cloud2Idx(cloud2.size());
Point<DIM, T> dir;
std::vector<Point<DIM, T> > deltas(cloud1.size(), Point<DIM, T>());
T* projHist1 = (T*)malloc_simd(cloud1.size() * sizeof(T), 32);
T* projHist2 = (T*)malloc_simd(cloud2.size() * sizeof(T), 32);
engine.seed(10);
std::vector<int> corr1d;
double d = 0;
for (int iter = 0; iter < niter; iter++) { // number of random slices
// random directions
double n = 0;
for (int i = 0; i < DIM; i+=2) {
Point<2, double> randGauss = BoxMuller<double>();
dir[i] = randGauss[0];
n += dir[i] * dir[i];
if (i < DIM-1) {
dir[i+1] = randGauss[1];
n += dir[i+1] * dir[i+1];
}
}
n = std::sqrt(n);
for (int i = 0; i < DIM; i++) {
dir[i] /= n;
}
// sort according to projection on direction
Projector<DIM, T> proj(dir);
for (int i = 0; i < cloud1.size(); i++) {
cloud1Idx[i] = std::make_pair(proj.proj(cloud1[i]), i);
}
for (int i = 0; i < cloud2.size(); i++) {
cloud2Idx[i] = std::make_pair(proj.proj(cloud2[i]), i);
}
std::thread mythread( [&]{std::sort(cloud1Idx.begin(), cloud1Idx.end()); } );
std::sort(cloud2Idx.begin(), cloud2Idx.end());
mythread.join();
for (int i = 0; i < cloud1.size(); i++) {
projHist1[i] = cloud1Idx[i].first;
}
for (int i = 0; i < cloud2.size(); i++) {
projHist2[i] = cloud2Idx[i].first;
}
T emd = transport1d(projHist1, projHist2, cloud1.size(), cloud2.size(), corr1d);
d += emd;
if (advect) {
for (int i = 0; i < cloud1Idx.size(); i++) {
for (int j = 0; j < DIM; j++) {
cloud1[cloud1Idx[i].second][j] += (projHist2[corr1d[i]] - projHist1[i])*dir[j];
}
}
}
}
free_simd(projHist1);
free_simd(projHist2);
return d*2.0/niter;
}
template<int DIM, typename T> // Mbary should be less than min_i(bary[i].size())
void unbalanced_barycenter(int Mbary, int niters, int nslices, const std::vector<T> &weights, const std::vector< std::vector<Point<DIM, T> > > &points, std::vector<Point<DIM, T> > &barycenter) {
auto start = std::chrono::system_clock::now();
barycenter.resize(Mbary);
for (int i = 0; i < Mbary; i++) {
for (int j = 0; j < DIM; j++) {
barycenter[i][j] = points[0][i][j]; //(rand() / (double)RAND_MAX)* 512.0; // //(rand() / (double)RAND_MAX)* 512.0;
}
}
// a fixed set of slice directions across iterations (might be rotated)
// random direction for DIM>2, else equispaced
srand(10);
engine.seed(10);
std::vector<Point<DIM, T> > dirs(nslices);
for (int slice = 0; slice < nslices; slice++) {
if (DIM == 2) {
double theta = slice*M_PI / nslices;
dirs[slice][0] = cos(theta);
dirs[slice][1] = sin(theta);
} else {
double n = 0;
for (int i = 0; i < DIM; i+=2) {
Point<2, double> randGauss = BoxMuller<double>();
dirs[slice][i] = randGauss[0];
n += dirs[slice][i] * dirs[slice][i];
if (i < DIM - 1) {
dirs[slice][i + 1] = randGauss[1];
n += dirs[slice][i + 1] * dirs[slice][i + 1];
}
}
n = std::sqrt(n);
for (int i = 0; i < DIM; i++) {
dirs[slice][i] /= n;
}
}
}
std::vector<T*> projHist1(omp_get_max_threads());
for (int i=0; i< omp_get_max_threads() ; i++)
projHist1[i] = (T*)malloc_simd(barycenter.size() * sizeof(T), 32);
std::vector<std::vector<std::pair<T, int > > > cloud1Idx(omp_get_max_threads(), std::vector<std::pair<T, int > >(Mbary));
std::vector<std::vector<std::pair<T, int > > > cloud2Idx(omp_get_max_threads());
for (int iter = 0; iter < niters; iter++) {
double d = 0;
std::vector<Point<DIM, T> > offset(barycenter.size());
std::vector<Point<DIM, T> > newbary = barycenter;
for (int cloud = 0; cloud < points.size(); cloud++) {
#pragma omp parallel
{
int thread_num = omp_get_thread_num();
cloud2Idx[thread_num].resize(points[cloud].size());
T* projHist2 = (T*)malloc_simd(points[cloud].size() * sizeof(T), 32);
std::vector<int> corr1d;
double local_d = 0;
#pragma omp for schedule(dynamic)
for (int slice = 0; slice < nslices; slice++) { // number of random slices
Point<DIM, T> dir = dirs[slice];
// sort according to projection on direction
Projector<DIM, T> proj(dir);
for (int i = 0; i < Mbary; i++) {
cloud1Idx[thread_num][i] = std::make_pair(proj.proj(barycenter[i]), i);
}
for (int i = 0; i < points[cloud].size(); i++) {
cloud2Idx[thread_num][i] = std::make_pair(proj.proj(points[cloud][i]), i);
}
std::thread mythread( [&]{
std::sort(cloud1Idx[thread_num].begin(), cloud1Idx[thread_num].end());
for (int i = 0; i < Mbary; i++) {
projHist1[thread_num][i] = cloud1Idx[thread_num][i].first;
}
});
std::sort(cloud2Idx[thread_num].begin(), cloud2Idx[thread_num].end());
for (int i = 0; i < points[cloud].size(); i++) {
projHist2[i] = cloud2Idx[thread_num][i].first;
}
mythread.join();
transport1d(projHist1[thread_num], projHist2, Mbary, points[cloud].size(), corr1d);
for (int i = 0; i < corr1d.size(); i++) {
local_d += weights[cloud] * cost(projHist1[thread_num][i], projHist2[corr1d[i]]);
}
#pragma omp critical
{
for (int i = 0; i < cloud1Idx[thread_num].size(); i++) {
int perm = cloud1Idx[thread_num][i].second;
for (int j = 0; j < DIM; j++) {
newbary[perm][j] += DIM * (weights[cloud] * (projHist2[corr1d[i]] - projHist1[thread_num][i])*dir[j]) / nslices;
}
}
}
}
free_simd(projHist2);
#pragma omp atomic
d += local_d;
}
}
barycenter = newbary;
}
for (int i=0; i<omp_get_max_threads(); i++)
free_simd(projHist1[i]);
}
// transport-based ICP, using either a rigid transform (scaling = false) or similarity transform (scaling = true)
template<int DIM, typename T>
void fast_iterative_sliced_transport(int niters, int nslices, std::vector<Point<DIM, T> > &pointsSrc, const std::vector<Point<DIM, T> > &pointsDst, std::vector<double> &rot, std::vector<double> &trans, bool useScaling, double &scaling) {
rot.resize(DIM*DIM);
trans.resize(DIM);
scaling = 1;
std::fill(rot.begin(), rot.end(), 0);
for (int i = 0; i < DIM; i++)
rot[i*DIM + i] = 1;
std::fill(trans.begin(), trans.end(), 0);
for (int iter = 0; iter < niters; iter++) {
std::vector<Point<DIM, T> > pointsSrcCopy(pointsSrc);
correspondencesNd(pointsSrcCopy, pointsDst, nslices, true);
Point<DIM, T> center1, center2;
for (int i = 0; i < pointsSrc.size(); i++) {
center1 += pointsSrc[i];
center2 += pointsSrcCopy[i]; // pointsSrcCopy and pointsSrc have the same size
}
center1 *= (1.0 / pointsSrc.size());
center2 *= (1.0 / pointsSrc.size());
double cov[DIM*DIM];
memset(cov, 0, DIM*DIM * sizeof(cov[0]));
for (int i = 0; i < pointsSrc.size(); i++) {
Point<DIM, T> p = pointsSrc[i] - center1;
Point<DIM, T> q = pointsSrcCopy[i] - center2;
for (int j = 0; j < DIM; j++) {
for (int k = 0; k < DIM; k++) {
cov[j * DIM + k] += q[j] * p[k];
}
}
}
cimg_library::CImg<double> mat(cov, DIM, DIM), S(1,DIM), U(DIM, DIM), V(DIM, DIM), orth(DIM,DIM), diag(DIM, DIM,1,1,0.0), rotM(DIM, DIM);
mat.SVD(U, S, V, true, 100, 0.0);
orth = U * V.get_transpose();
double d = orth.det();
for (int i = 0; i < DIM - 1; i++) {
diag(i, i) = 1;
}
diag(DIM - 1, DIM - 1) = d;
double scal = 1;
if (useScaling) {
double std = 0;
for (int i = 0; i < pointsSrc.size(); i++) {
std += (pointsSrc[i]-center1).norm2();
}
double s2 = 0;
for (int i = 0; i < DIM; i++) {
s2 += std::abs(S(0,i));
}
scal = s2 / std;
scaling *= scal;
}
rotM = U * diag*V.get_transpose();
cimg_library::CImg<double> rotG(const_cast<double*>(&rot[0]), DIM, DIM,1,1,true), transG(const_cast<double*>(&trans[0]), 1,DIM,1,1,true), C1(¢er1[0], 1, DIM), C2(¢er2[0], 1, DIM);
rotG = rotM*rotG;
transG = transG + C2 - C1;
for (int i = 0; i < pointsSrc.size(); i++) {
cimg_library::CImg<T> P(const_cast<T*>(&pointsSrc[i][0]), 1,DIM,1,1,true);
P = scal*(rotM * (P - C1)) + C2;
}
}
cimg_library::CImg<double> rotG(const_cast<double*>(&rot[0]), DIM, DIM, 1, 1, true), transG(const_cast<double*>(&trans[0]), 1, DIM, 1, 1, true);
if (useScaling)
transG = scaling*rotG * transG;
else
transG = rotG * transG;
}
}; // end class
|
collapse.c | #include <stdio.h>
#include <omp.h>
enum {
M = 2,
N = 5
};
float m[M * N];
void fun1()
{
// 1D by horizontal strips
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
m[i * N + j] = i * j;
}
}
}
}
void fun2()
{
// 1D by vertical strips
#pragma omp parallel
{
printf("Threads %d\n", omp_get_num_threads());
for (int i = 0; i < M; i++) {
#pragma omp for
for (int j = 0; j < N; j++) {
m[i * N + j] = i * j;
}
}
}
}
void fun3()
{
#pragma omp parallel
{
printf("Threads %d\n", omp_get_num_threads());
#pragma omp for collapse(2)
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
printf("%d [%d, %d]\n", omp_get_thread_num(), i, j);
m[i * N + j] = i * j;
}
}
}
}
void fun4()
{
#pragma omp parallel
{
printf("Threads %d\n", omp_get_num_threads());
// 3 3 2 2
#pragma omp for
for (int ij = 0; ij < M * N; ij++) {
int i = ij / N;
int j = ij % N;
printf("%d [%d, %d]\n", omp_get_thread_num(), i, j);
m[i * N + j] = i * j;
}
}
}
int main(int argc, char **argv)
{
fun3();
printf("---\n");
fun4();
return 0;
}
|
graph_impl.h | /*****************************************************************************
*
* ALPS/looper: multi-cluster quantum Monte Carlo algorithms for spin systems
*
* Copyright (C) 1997-2012 by Synge Todo <wistaria@comp-phys.org>
*
* This software is published under the ALPS Application License; you
* can use, redistribute it and/or modify it under the terms of the
* license, either version 1 or (at your option) any later version.
*
* You should have received a copy of the ALPS Application License
* along with this software; see the file LICENSE. If not, the license
* is also available from http://alps.comp-phys.org/.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef LOOPER_GRAPH_IMPL_H
#define LOOPER_GRAPH_IMPL_H
#include <alps/config.h>
#if defined(LOOPER_ENABLE_OPENMP) && defined(ALPS_ENABLE_OPENMP_WORKER) && !defined(LOOPER_OPENMP)
# define LOOPER_OPENMP
#endif
#include "graph.h"
#include "location_impl.h"
#ifdef HAVE_PARAPACK_13
# include <alps/math.hpp>
#else
# include <alps/numeric/is_nonzero.hpp>
# include <alps/numeric/is_zero.hpp>
#endif
#include <alps/random/random_choice.hpp>
#include <boost/array.hpp>
#include <boost/throw_exception.hpp>
#include <boost/tuple/tuple.hpp>
#include <stdexcept>
#ifdef LOOPER_OPENMP
# include <omp.h>
#endif
#ifdef HAVE_PARAPACK_13
using alps::is_nonzero;
using alps::is_zero;
#else
using alps::numeric::is_nonzero;
using alps::numeric::is_zero;
#endif
namespace looper {
//
// site graph type
//
// g = 0 (vertically disconnected)
//
struct site_graph_type {
static bool is_valid_gid(int g) { return g == 0; }
static bool is_compatible(int /* g */, int /* c */) { return true; }
static bool is_frozen(int /* g */) { return false; }
template<class T>
// obsolete interface
static boost::tuple<int /* curr */, int /* loop0 */, int /* loop1 */>
reconnect(int /* g */, std::vector<T>& fragments, int curr) {
int loop1 = add(fragments);
return boost::make_tuple(loop1, curr, loop1);
}
// current interface
template<class T>
static boost::tuple<int /* fid */, int /* curr */, int /* loop0 */, int /* loop1 */>
reconnect(int /* g */, std::vector<T>& fragments, int fid, int curr) {
fragments[fid] = T();
int loop1 = fid;
++fid;
return boost::make_tuple(fid, loop1, curr, loop1);
}
};
//
// bond graph types
//
// g = 0 o.o g = 1 o o
// X
// o.o o o
//
// g = 2 o.o g = 3 o-o
// |*| |X|
// o.o o-o
//
// g = 4 o o g = 5 o-o
// *
// o o o-o
// optimized bond_graph_type for Ising model
struct ising_bond_graph_type {
static bool is_valid_gid(int g) { return g == 2 || g == 3; }
static bool is_compatible(int g, int c0, int c1) { return (g & 1) ^ c0 ^ c1; }
static bool is_frozen(int /* g */) { return true; }
// obsolete interface
template<class T>
static boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int curr0, int curr1) {
int loop = unify(fragments, curr0, curr1);
return boost::make_tuple(loop, loop, loop, loop);
}
// current interface
template<class T>
static boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */,
int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int fid, int curr0, int curr1) {
int loop = unify(fragments, curr0, curr1);
return boost::make_tuple(fid, loop, loop, loop, loop);
}
static bool has_nontrivial_diagonal_choice_helper() { return false; }
struct diagonal_choice_helper {
diagonal_choice_helper() {}
template<std::size_t N>
diagonal_choice_helper(boost::array<double, N> const&) {}
};
template<typename RNG>
static int choose_diagonal(RNG& rng, std::vector<diagonal_choice_helper> const&, int,
int c0, int c1) {
return (c0 == c1) ? 3 : 2;
}
static bool has_nontrivial_offdiagonal_choice_helper() { return false; }
struct offdiagonal_choice_helper {
offdiagonal_choice_helper() {}
template<std::size_t N>
offdiagonal_choice_helper(boost::array<double, N> const&) {}
};
template<typename RNG>
static int choose_offdiagonal(RNG&, std::vector<offdiagonal_choice_helper> const&, int, int,
int) {
boost::throw_exception(std::logic_error("ising_bond_graph_type::choose_offdiagonal"));
return 0;
}
};
// optimized bond_graph_type for Heisenberg Antiferromagnet
struct haf_bond_graph_type {
static bool is_valid_gid(int g) { return g == 0; }
static bool is_compatible(int /* g */, int c0, int c1) { return c0 != c1; }
static bool is_frozen(int /* g */) { return false; }
// obsolete interface
template<class T>
static boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int curr0, int curr1) {
int loop0 = unify(fragments, curr0, curr1);
int loop1 = curr0 = curr1 = add(fragments);
return boost::make_tuple(curr0, curr1, loop0, loop1);
}
// current interface
template<class T>
static boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */,
int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int fid, int curr0, int curr1) {
fragments[fid] = T();
int loop0 = unify(fragments, curr0, curr1);
int loop1 = curr0 = curr1 = fid;
++fid;
return boost::make_tuple(fid, curr0, curr1, loop0, loop1);
}
static bool has_nontrivial_diagonal_choice_helper() { return false; }
struct diagonal_choice_helper {
diagonal_choice_helper() {}
template<std::size_t N>
diagonal_choice_helper(boost::array<double, N> const&) {}
};
template<typename RNG>
static int choose_diagonal(RNG&, std::vector<diagonal_choice_helper> const&, int, int, int) {
return 0;
}
static bool has_nontrivial_offdiagonal_choice_helper() { return false; }
struct offdiagonal_choice_helper {
offdiagonal_choice_helper() {}
template<std::size_t N>
offdiagonal_choice_helper(boost::array<double, N> const&) {}
};
template<typename RNG>
static int choose_offdiagonal(RNG&, std::vector<offdiagonal_choice_helper> const&, int, int,
int) {
return 0;
}
};
// optimized bond_graph_type for Heisenberg Ferromagnet
struct hf_bond_graph_type {
static bool is_valid_gid(int g) { return g == 1; }
static bool is_compatible(int c0, int c1) { return c0 == c1; }
static bool is_frozen(int /* g */) { return false; }
// obsolete interface
template<class T>
static boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int curr0, int curr1) {
int loop0 = curr0;
int loop1 = curr1;
std::swap(curr0, curr1);
return boost::make_tuple(curr0, curr1, loop0, loop1);
}
// current interface
template<class T>
static boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */,
int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int fid, int curr0, int curr1) {
int loop0 = curr0;
int loop1 = curr1;
std::swap(curr0, curr1);
return boost::make_tuple(fid, curr0, curr1, loop0, loop1);
}
static bool has_nontrivial_diagonal_choice_helper() { return false; }
struct diagonal_choice_helper {
diagonal_choice_helper() {}
template<std::size_t N>
diagonal_choice_helper(boost::array<double, N> const&) {}
};
template<typename RNG>
static int choose_diagonal(RNG&, std::vector<diagonal_choice_helper> const&, int, int, int) {
return 1;
}
static bool has_nontrivial_offdiagonal_choice_helper() { return false; }
struct offdiagonal_choice_helper {
offdiagonal_choice_helper() {}
template<std::size_t N>
offdiagonal_choice_helper(boost::array<double, N> const&) {}
};
template<typename RNG>
static int choose_offdiagonal(RNG&, std::vector<offdiagonal_choice_helper> const&, int, int,
int) {
return 1;
}
};
// bond_graph_type for XXZ interaction
struct xxz_bond_graph_type {
static bool is_valid_gid(int g) { return g >= 0 && g <= 3; }
static bool is_compatible(int g, int c0, int c1) { return (g & 1) ^ c0 ^ c1; }
static bool is_frozen(int g) { return ((g & 2) == 2); }
// obsolete interface
template<class T>
static boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int curr0, int curr1) {
int loop0, loop1;
if ((g & 2) == 2) {
loop0 = loop1 = curr0 = curr1 = unify(fragments, curr0, curr1);
} else if (g == 0) {
loop0 = unify(fragments, curr0, curr1);
loop1 = curr0 = curr1 = add(fragments);
} else {
loop0 = curr0;
loop1 = curr1;
std::swap(curr0, curr1);
}
return boost::make_tuple(curr0, curr1, loop0, loop1);
}
// current interface
template<class T>
static boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */,
int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int fid, int curr0, int curr1) {
int loop0, loop1;
if ((g & 2) == 2) {
loop0 = loop1 = curr0 = curr1 = unify(fragments, curr0, curr1);
} else if (g == 0) {
fragments[fid] = T();
loop0 = unify(fragments, curr0, curr1);
loop1 = curr0 = curr1 = fid;
++fid;
} else {
loop0 = curr0;
loop1 = curr1;
std::swap(curr0, curr1);
}
return boost::make_tuple(fid, curr0, curr1, loop0, loop1);
}
static bool has_nontrivial_diagonal_choice_helper() { return true; }
struct diagonal_choice_helper {
diagonal_choice_helper() {}
template<std::size_t N>
diagonal_choice_helper(boost::array<double, N> const& v) {
p[0] = is_nonzero<1>(v[0] + v[2]) ? v[0] / (v[0] + v[2]) : 1; // for antiparallel
p[1] = is_nonzero<1>(v[1] + v[3]) ? v[1] / (v[1] + v[3]) : 1; // for parallel
}
boost::array<double, 2> p;
};
template<typename RNG>
static int choose_diagonal(RNG& rng, std::vector<diagonal_choice_helper> const& helpers, int b,
int c0, int c1) {
int c = 1 ^ c0 ^ c1; // 0 for antiparallel, 1 for parallel,
return (rng() < helpers[b].p[c] ? 0 : 2) ^ c;
}
static bool has_nontrivial_offdiagonal_choice_helper() { return true; }
struct offdiagonal_choice_helper {
offdiagonal_choice_helper() {}
template<std::size_t N>
offdiagonal_choice_helper(boost::array<double, N> const& v) {
p = is_nonzero<1>(v[0] + v[1]) ? v[0] / (v[0] + v[1]) : 1;
}
double p;
};
template<typename RNG>
static int choose_offdiagonal(RNG& rng, std::vector<offdiagonal_choice_helper> const& helpers,
int b, int /* c0 */, int /* c1 */) {
return rng() < helpers[b].p ? 0 : 1;
}
};
// bond_graph_type for XYZ interaction
struct xyz_bond_graph_type {
static bool is_valid_gid(int g) { return g >= 0 && g <= 5; }
static bool is_compatible(int g, int c0, int c1) { return (g & 1) ^ c0 ^ c1; }
static bool is_frozen(int g) { return ((g & 2) == 2); }
// obsolete interface
template<typename T>
static boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int curr0, int curr1) {
int loop0, loop1;
if ((g & 2) == 2) {
loop0 = loop1 = curr0 = curr1 = unify(fragments, curr0, curr1);
} else if (g == 0 || g == 5) {
loop0 = unify(fragments, curr0, curr1);
loop1 = curr0 = curr1 = add(fragments);
} else {
loop0 = curr0;
loop1 = curr1;
std::swap(curr0, curr1);
}
return boost::make_tuple(curr0, curr1, loop0, loop1);
}
// current interface
template<typename T>
static boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */,
int /* loop1 */>
reconnect(int g, std::vector<T>& fragments, int fid, int curr0, int curr1) {
int loop0, loop1;
if ((g & 2) == 2) {
loop0 = loop1 = curr0 = curr1 = unify(fragments, curr0, curr1);
} else if (g == 0 || g == 5) {
fragments[fid] = T();
loop0 = unify(fragments, curr0, curr1);
loop1 = curr0 = curr1 = fid;
++fid;
} else {
loop0 = curr0;
loop1 = curr1;
std::swap(curr0, curr1);
}
return boost::make_tuple(fid, curr0, curr1, loop0, loop1);
}
static bool has_nontrivial_diagonal_choice_helper() { return true; }
struct diagonal_choice_helper {
diagonal_choice_helper() {}
template<std::size_t N>
diagonal_choice_helper(boost::array<double, N> const& v) {
// for antiparallel
p[0] = is_nonzero<1>(v[0] + v[2] + v[4]) ? v[0] / (v[0] + v[2] + v[4]) : 1;
p[2] = is_nonzero<1>(v[0] + v[2] + v[4]) ? (v[0] + v[2])/ (v[0] + v[2] + v[4]) : 1;
// for parallel
p[1] = is_nonzero<1>(v[1] + v[3] + v[5]) ? v[1] / (v[1] + v[3] + v[5]) : 1;
p[3] = is_nonzero<1>(v[1] + v[3] + v[5]) ? (v[1] + v[3]) / (v[1] + v[3] + v[5]) : 1;
}
boost::array<double, 4> p;
};
template<typename RNG>
static int choose_diagonal(RNG& rng, std::vector<diagonal_choice_helper> const& helpers, int b,
int c0, int c1) {
double r = rng();
if (c0 != c1) {
if (r < helpers[b].p[0])
return 0;
else if (r < helpers[b].p[2])
return 2;
else
return 4;
} else {
if (r < helpers[b].p[1])
return 1;
else if (r < helpers[b].p[3])
return 3;
else
return 5;
}
}
static bool has_nontrivial_offdiagonal_choice_helper() { return true; }
struct offdiagonal_choice_helper {
offdiagonal_choice_helper() {}
template<std::size_t N>
offdiagonal_choice_helper(boost::array<double, N> const& v) {
p[0] = is_nonzero<1>(v[0] + v[1]) ? v[0] / (v[0] + v[1]) : 1;
p[1] = is_nonzero<1>(v[4] + v[5]) ? v[4] / (v[4] + v[5]) : 1;
}
boost::array<double, 2> p;
};
template<typename RNG>
static int choose_offdiagonal(RNG& rng, std::vector<offdiagonal_choice_helper> const& helpers,
int b, int c0, int c1) {
double r = rng();
if (c0 != c1)
return (r < helpers[b].p[0] ? 0 : 1);
else
return (r < helpers[b].p[1] ? 4 : 5);
}
};
template<typename SITE, typename BOND, typename LOC>
class local_graph {
public:
typedef SITE site_graph_t;
typedef BOND bond_graph_t;
typedef LOC location_t;
local_graph() {}
local_graph(int type, const location_t& loc) : type_(type), loc_(loc) {}
int type() const { return type_; }
bool is_compatible(int c) const {
#ifndef NDEBUG
if (!is_site())
boost::throw_exception(std::logic_error("local_graph<>::is_compatible"));
#endif
return site_graph_t::is_compatible(type_, c);
}
bool is_compatible(int c0, int c1) const {
#ifndef NDEBUG
if (!is_bond())
boost::throw_exception(std::logic_error("local_graph<>::is_compatible"));
#endif
return bond_graph_t::is_compatible(type_, c0, c1);
}
const location_t& loc() const { return loc_; }
int pos() const { return loc_.pos(); }
int source() const { return loc_.source(); }
int target() const { return loc_.target(); }
bool is_bond() const { return loc_.is_bond(); }
bool is_site() const { return loc_.is_site(); }
// obsolete interface
template<class T>
boost::tuple<int /* curr */, int /* loop0 */, int /* loop1 */>
reconnect(std::vector<T>& fragments, int curr) const {
return site_graph_t::reconnect(type_, fragments, curr);
}
// current interface
template<class T>
boost::tuple<int /* fid */, int /* curr */, int /* loop0 */, int /* loop1 */>
reconnect_site(std::vector<T>& fragments, int fid, int curr) const {
return site_graph_t::reconnect(type_, fragments, fid, curr);
}
// obsolete interface
template<class T>
boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(std::vector<T>& fragments, int curr0, int curr1) const {
return bond_graph_t::reconnect(type_, fragments, curr0, curr1);
}
// current interface
template<class T>
boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect_bond(std::vector<T>& fragments, int fid, int curr0, int curr1) const {
return bond_graph_t::reconnect(type_, fragments, fid, curr0, curr1);
}
static int loop_l(int /* t */, int loop0, int /* loop1 */) { return loop0; }
static int loop_u(int /* t */, int /* loop0 */, int loop1) { return loop1; }
static int loop_l0(int /* t */, int loop0, int /* loop1 */) { return loop0; }
static int loop_l1(int t, int loop0, int loop1) { return (t ==0 || t == 5) ? loop0 : loop1; }
static int loop_u0(int /* t */, int /* loop0 */, int loop1) { return loop1; }
static int loop_u1(int t, int loop0, int loop1) { return (t ==0 || t == 5) ? loop1 : loop0; }
private:
int type_;
location_t loc_;
};
template<typename SITE, typename BOND, typename LOC>
inline int type(const local_graph<SITE, BOND, LOC>& g) { return g.type(); }
template<typename SITE, typename BOND, typename LOC>
inline bool is_compatible(const local_graph<SITE, BOND, LOC>& g, int c) {
return g.is_compatible(c);
}
template<typename SITE, typename BOND, typename LOC>
inline bool is_compatible(const local_graph<SITE, BOND, LOC>& g, int c0, int c1) {
return g.is_compatible(c0, c1);
}
template<typename SITE, typename BOND, typename LOC>
inline int pos(const local_graph<SITE, BOND, LOC>& g) { return g.pos(); }
template<typename SITE, typename BOND, typename LOC>
inline bool is_site(const local_graph<SITE, BOND, LOC>& g) { return g.is_site(); }
template<typename SITE, typename BOND, typename LOC>
inline bool is_bond(const local_graph<SITE, BOND, LOC>& g) { return g.is_bond(); }
// obsolete interface
template<typename T, typename SITE, typename BOND, typename LOC>
boost::tuple<int /* curr */, int /* loop0 */, int /* loop1 */>
reconnect(std::vector<T>& fragments, const local_graph<SITE, BOND, LOC>& g,
int curr) {
return g.reconnect(fragments, curr);
}
// current interface
template<typename T, typename SITE, typename BOND, typename LOC>
boost::tuple<int /* fid */, int /* curr */, int /* loop0 */, int /* loop1 */>
reconnect(std::vector<T>& fragments, int fid, const local_graph<SITE, BOND, LOC>& g,
int curr) {
return g.reconnect_site(fragments, fid, curr);
}
// obsolete interface
template<typename T, typename SITE, typename BOND, typename LOC>
boost::tuple<int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(std::vector<T>& fragments, const local_graph<SITE, BOND, LOC>& g,
int curr0, int curr1) {
return g.reconnect(fragments, curr0, curr1);
}
// current interface
template<typename T, typename SITE, typename BOND, typename LOC>
boost::tuple<int /* fid */, int /* curr0 */, int /* curr1 */, int /* loop0 */, int /* loop1 */>
reconnect(std::vector<T>& fragments, int fid, const local_graph<SITE, BOND, LOC>& g,
int curr0, int curr1) {
return g.reconnect_bond(fragments, fid, curr0, curr1);
}
//
// graph_chooser
//
template<typename LOCAL_GRAPH> class graph_chooser;
template<typename SITE, typename BOND, typename LOC>
class graph_chooser<local_graph<SITE, BOND, LOC> > {
public:
typedef SITE site_graph_t;
typedef BOND bond_graph_t;
typedef LOC location_t;
typedef local_graph<site_graph_t, bond_graph_t, location_t> local_graph_t;
graph_chooser() {}
template<typename WEIGHT_TABLE>
graph_chooser(const WEIGHT_TABLE& wt, bool is_path_integral) {
init(wt, is_path_integral);
}
template<class WEIGHT_TABLE, typename SHARING>
graph_chooser(const WEIGHT_TABLE& wt, SHARING const& sharing, bool is_path_integral) {
init(wt, sharing, is_path_integral);
}
template<typename WEIGHT_TABLE>
void init(const WEIGHT_TABLE& wt, bool is_path_integral) {
int num_threads = 1;
dist_graph_.resize(num_threads);
graph_.resize(num_threads);
for (int i = 0; i < graph_.size(); ++i) graph_[i].resize(0);
diag_.resize(0);
offdiag_.resize(0);
std::vector<double> w;
weight_.resize(num_threads);
for (int i = 0; i < graph_.size(); ++i) weight_[i] = 0;
BOOST_FOREACH(typename WEIGHT_TABLE::site_weight_t const& sw, wt.site_weights()) {
for (int g = 0; g < WEIGHT_TABLE::num_site_graphs; ++g) {
if (is_nonzero<1>(sw.second.v[g])) {
if (site_graph_t::is_valid_gid(g)) {
graph_[0].push_back(local_graph_t(g, location_t::site_location(sw.first)));
w.push_back(sw.second.v[g]);
weight_[0] += sw.second.v[g];
} else {
boost::throw_exception(std::runtime_error("graph_chooser::init"));
}
}
}
}
BOOST_FOREACH(typename WEIGHT_TABLE::bond_weight_t const& bw, wt.bond_weights()) {
for (int g = 0; g < WEIGHT_TABLE::num_bond_graphs; ++g) {
if (is_nonzero<1>(bw.second.v[g])) {
if (bond_graph_t::is_valid_gid(g)) {
graph_[0].push_back(local_graph_t(g, location_t::bond_location(bw.first)));
w.push_back(bw.second.v[g]);
weight_[0] += bw.second.v[g];
} else {
boost::throw_exception(std::runtime_error("graph_chooser::init"));
}
}
}
if (!is_path_integral && bond_graph_t::has_nontrivial_diagonal_choice_helper())
diag_.push_back(typename bond_graph_t::diagonal_choice_helper(bw.second.v));
if (bond_graph_t::has_nontrivial_offdiagonal_choice_helper())
offdiag_.push_back(typename bond_graph_t::offdiagonal_choice_helper(bw.second.v));
}
if (w.size()) dist_graph_[0].init(w);
if (is_zero<2>(weight_[0])) weight_[0] = 1.0e-20;
}
#ifdef LOOPER_OPENMP
template<typename WEIGHT_TABLE, typename SHARING>
void init(const WEIGHT_TABLE& wt, SHARING const& sharing, bool is_path_integral) {
int num_threads = omp_get_max_threads();
if (num_threads == 1) {
init(wt, is_path_integral);
return;
}
dist_graph_.resize(num_threads);
graph_.resize(num_threads);
diag_.resize(0);
offdiag_.resize(0);
weight_.resize(num_threads);
std::vector<std::vector<double> > w(num_threads);
// check for site operator
BOOST_FOREACH(typename WEIGHT_TABLE::site_weight_t const& sw, wt.site_weights()) {
for (int g = 0; g < WEIGHT_TABLE::num_site_graphs; ++g) {
if (is_nonzero<1>(sw.second.v[g]))
boost::throw_exception(std::runtime_error("graph_chooser::init() site operator is not supported in OpenMP parallelization"));
}
}
#pragma omp parallel
{
int tid = omp_get_thread_num();
graph_[tid].resize(0);
weight_[tid] = 0;
// site weight is not supported
//// BOOST_FOREACH(typename WEIGHT_TABLE::bond_weight_t const& bw, wt.bond_weights()) {
typename WEIGHT_TABLE::bond_weight_iterator bwi, bwi_end;
for (boost::tie(bwi, bwi_end) = wt.bond_weights(); bwi != bwi_end; ++bwi) {
if (bwi->first >= sharing.bond_offset(tid) &&
bwi->first < sharing.bond_offset(tid) + sharing.num_bonds_local(tid)) {
for (int g = 0; g < WEIGHT_TABLE::num_bond_graphs; ++g) {
if (is_nonzero<1>(bwi->second.v[g])) {
if (bond_graph_t::is_valid_gid(g)) {
graph_[tid].push_back(local_graph_t(g, location_t::bond_location(bwi->first)));
w[tid].push_back(bwi->second.v[g]);
weight_[tid] += bwi->second.v[g];
} else {
boost::throw_exception(std::runtime_error("graph_chooser::init"));
}
}
}
}
}
if (w[tid].size()) dist_graph_[tid].init(w[tid]);
if (is_zero<2>(weight_[tid])) weight_[tid] = 1.0e-20;
} // end omp parallel
//// BOOST_FOREACH(typename WEIGHT_TABLE::bond_weight_t const& bw, wt.bond_weights()) {
typename WEIGHT_TABLE::bond_weight_iterator bwi, bwi_end;
for (boost::tie(bwi, bwi_end) = wt.bond_weights(); bwi != bwi_end; ++bwi) {
if (!is_path_integral && bond_graph_t::has_nontrivial_diagonal_choice_helper())
diag_.push_back(typename bond_graph_t::diagonal_choice_helper(bwi->second.v));
if (bond_graph_t::has_nontrivial_offdiagonal_choice_helper())
offdiag_.push_back(typename bond_graph_t::offdiagonal_choice_helper(bwi->second.v));
}
}
#endif
template<typename RNG>
const local_graph_t& graph(RNG& rng) const { return graph_[0][dist_graph_[0](rng)]; }
template<typename RNG>
const local_graph_t& graph(RNG& rng, int tid) const { return graph_[tid][dist_graph_[tid](rng)]; }
template<typename RNG>
local_graph_t diagonal(RNG& /* rng */, const location_t& loc, int /* c */) const {
return local_graph_t(0, loc);
}
template<typename RNG>
local_graph_t diagonal(RNG& rng, const location_t& loc, int c0, int c1) const {
return local_graph_t(bond_graph_t::choose_diagonal(rng, diag_, pos(loc), c0, c1), loc);
}
template<typename RNG>
local_graph_t offdiagonal(RNG& rng, const location_t& loc, int c0, int c1) const {
return local_graph_t(bond_graph_t::choose_offdiagonal(rng, offdiag_, pos(loc), c0, c1), loc);
}
double weight() const { return weight_[0]; }
double weight(int tid) const { return weight_[tid]; }
private:
std::vector<alps::random_choice<double> > dist_graph_;
std::vector<double> weight_;
std::vector<std::vector<local_graph_t> > graph_;
std::vector<typename bond_graph_t::diagonal_choice_helper> diag_;
std::vector<typename bond_graph_t::offdiagonal_choice_helper> offdiag_;
};
} // end namespace looper
#endif // LOOPER_GARPH_IMPL_H
|
r_ao2mo.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*
*/
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <math.h>
#include <assert.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "np_helper/np_helper.h"
#include "vhf/cvhf.h"
#include "vhf/fblas.h"
#include "vhf/nr_direct.h"
#include "r_ao2mo.h"
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
#define NCTRMAX 128
/*
* s1-AO integrals to s1-MO integrals, efficient for i_count < j_count
* shape requirements:
* vout[:,bra_count*ket_count], eri[:,nao*nao]
* s1, s2 here to label the AO symmetry
*/
int AO2MOmmm_r_iltj(double complex *vout, double complex *eri,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case 1: return envs->bra_count * envs->ket_count;
case 2: return envs->nao * envs->nao;
}
const double D0 = 0;
const double D1 = 1;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int n2c = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
int i;
double *buf1 = malloc(sizeof(double)*n2c*i_count*3);
double *buf2 = buf1 + n2c*i_count;
double *buf3 = buf2 + n2c*i_count;
double *bufr, *bufi;
double *mo1 = malloc(sizeof(double) * n2c*MAX(i_count,j_count)*2);
double *mo2, *mo_r, *mo_i;
double *eri_r = malloc(sizeof(double) * n2c*n2c*3);
double *eri_i = eri_r + n2c*n2c;
double *eri1 = eri_i + n2c*n2c;
double *vout1, *vout2, *vout3;
// Gauss complex multiplication, C_pi^* (pq| = (iq|, where (pq| is in C-order
mo_r = envs->mo_r + i_start * n2c;
mo_i = envs->mo_i + i_start * n2c;
mo2 = mo1 + n2c*i_count;
for (i = 0; i < n2c*i_count; i++) {
mo1[i] = mo_r[i] - mo_i[i];
mo2[i] =-mo_i[i] - mo_r[i];
}
for (i = 0; i < n2c*n2c; i++) {
eri_r[i] = creal(eri[i]);
eri_i[i] = cimag(eri[i]);
eri1 [i] = eri_r[i] + eri_i[i];
}
dgemm_(&TRANS_N, &TRANS_N, &n2c, &i_count, &n2c,
&D1, eri1, &n2c, mo_r, &n2c, &D0, buf1, &n2c);
dgemm_(&TRANS_N, &TRANS_N, &n2c, &i_count, &n2c,
&D1, eri_r, &n2c, mo2, &n2c, &D0, buf2, &n2c);
dgemm_(&TRANS_N, &TRANS_N, &n2c, &i_count, &n2c,
&D1, eri_i, &n2c, mo1, &n2c, &D0, buf3, &n2c);
free(eri_r);
// C_qj^* (iq| = (ij|
bufr = buf3;
bufi = buf2;
for (i = 0; i < n2c*i_count; i++) {
buf3[i] = buf1[i] - buf3[i];
buf2[i] = buf1[i] + buf2[i];
}
for (i = 0; i < n2c*i_count; i++) {
buf1[i] = bufr[i] + bufi[i];
}
mo_r = envs->mo_r + j_start * n2c;
mo_i = envs->mo_i + j_start * n2c;
mo2 = mo1 + n2c*j_count;
for (i = 0; i < n2c*j_count; i++) {
mo1[i] = mo_r[i] + mo_i[i];
mo2[i] = mo_i[i] - mo_r[i];
}
vout1 = malloc(sizeof(double)*i_count*j_count*3);
vout2 = vout1 + i_count * j_count;
vout3 = vout2 + i_count * j_count;
dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &n2c,
&D1, mo_r, &n2c, buf1, &n2c, &D0, vout1, &j_count);
dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &n2c,
&D1, mo2, &n2c, bufr, &n2c, &D0, vout2, &j_count);
dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &n2c,
&D1, mo1, &n2c, bufi, &n2c, &D0, vout3, &j_count);
for (i = 0; i < i_count*j_count; i++) {
vout[i] = (vout1[i]-vout3[i]) + (vout1[i]+vout2[i])*_Complex_I;
}
free(vout1);
free(buf1);
free(mo1);
return 0;
}
int AO2MOmmm_r_s1_iltj(double complex *vout, double complex *eri,
struct _AO2MOEnvs *envs, int seekdim)
{
return AO2MOmmm_r_iltj(vout, eri, envs, seekdim);
}
/*
* s1-AO integrals to s1-MO integrals, efficient for i_count > j_count
* shape requirements:
* vout[:,bra_count*ket_count], eri[:,nao*nao]
*/
int AO2MOmmm_r_igtj(double complex *vout, double complex *eri,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case 1: return envs->bra_count * envs->ket_count;
case 2: return envs->nao * envs->nao;
}
const double D0 = 0;
const double D1 = 1;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int n2c = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
int i;
double *buf1 = malloc(sizeof(double)*n2c*j_count*3);
double *buf2 = buf1 + n2c*j_count;
double *buf3 = buf2 + n2c*j_count;
double *bufr, *bufi;
double *mo1 = malloc(sizeof(double) * n2c*MAX(i_count,j_count)*2);
double *mo2, *mo_r, *mo_i;
double *eri_r = malloc(sizeof(double) * n2c*n2c*3);
double *eri_i = eri_r + n2c*n2c;
double *eri1 = eri_i + n2c*n2c;
double *vout1, *vout2, *vout3;
// Gauss complex multiplication, C_qj (pq| = (pj|, where (pq| is in C-order
for (i = 0; i < n2c*n2c; i++) {
eri_r[i] = creal(eri[i]);
eri_i[i] = cimag(eri[i]);
eri1 [i] = eri_r[i] + eri_i[i];
}
mo_r = envs->mo_r + j_start * n2c;
mo_i = envs->mo_i + j_start * n2c;
mo2 = mo1 + n2c*j_count;
for (i = 0; i < n2c*j_count; i++) {
mo1[i] = mo_r[i] + mo_i[i];
mo2[i] = mo_i[i] - mo_r[i];
}
dgemm_(&TRANS_T, &TRANS_N, &j_count, &n2c, &n2c,
&D1, mo_r, &n2c, eri1, &n2c, &D0, buf1, &j_count);
dgemm_(&TRANS_T, &TRANS_N, &j_count, &n2c, &n2c,
&D1, mo2, &n2c, eri_r, &n2c, &D0, buf2, &j_count);
dgemm_(&TRANS_T, &TRANS_N, &j_count, &n2c, &n2c,
&D1, mo1, &n2c, eri_i, &n2c, &D0, buf3, &j_count);
free(eri_r);
bufr = buf3;
bufi = buf2;
for (i = 0; i < n2c*j_count; i++) {
buf3[i] = buf1[i] - buf3[i];
buf2[i] = buf1[i] + buf2[i];
}
for (i = 0; i < n2c*j_count; i++) {
buf1[i] = bufr[i] + bufi[i];
}
mo_r = envs->mo_r + i_start * n2c;
mo_i = envs->mo_i + i_start * n2c;
mo2 = mo1 + n2c*i_count;
for (i = 0; i < n2c*i_count; i++) {
mo1[i] = mo_r[i] - mo_i[i];
mo2[i] =-mo_i[i] - mo_r[i];
}
vout1 = malloc(sizeof(double)*i_count*j_count*3);
vout2 = vout1 + i_count * j_count;
vout3 = vout2 + i_count * j_count;
dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &n2c,
&D1, buf1, &j_count, mo_r, &n2c, &D0, vout1, &j_count);
dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &n2c,
&D1, bufr, &j_count, mo2, &n2c, &D0, vout2, &j_count);
dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &n2c,
&D1, bufi, &j_count, mo1, &n2c, &D0, vout3, &j_count);
for (i = 0; i < i_count*j_count; i++) {
vout[i] = (vout1[i]-vout3[i]) + (vout1[i]+vout2[i])*_Complex_I;
}
free(vout1);
free(buf1);
free(mo1);
return 0;
}
int AO2MOmmm_r_s1_igtj(double complex *vout, double complex *eri,
struct _AO2MOEnvs *envs, int seekdim)
{
return AO2MOmmm_r_igtj(vout, eri, envs, seekdim);
}
/*
* s1, s2ij, s2kl, s4 here to label the AO symmetry
* eris[ncomp,nkl,nao*nao]
*/
static void fill_s1(int (*intor)(), int (*fprescreen)(), double complex *eri,
int nkl, int ish, int jshtot, struct _AO2MOEnvs *envs)
{
const int nao = envs->nao;
const size_t nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int klsh_start = envs->klsh_start;
const int klsh_end = klsh_start + envs->klsh_count;
const int di = ao_loc[ish+1] - ao_loc[ish];
int kl, jsh, ksh, lsh, dj, dk, dl;
int icomp, i, j, k, l, n;
int shls[4];
double complex *buf = malloc(sizeof(double complex)
*di*nao*NCTRMAX*NCTRMAX*envs->ncomp);
assert(buf);
double complex *pbuf, *pbuf1, *peri;
shls[0] = ish;
for (kl = klsh_start; kl < klsh_end; kl++) {
// kl = k * (k+1) / 2 + l
ksh = kl / envs->nbas;
lsh = kl - ksh * envs->nbas;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
pbuf = buf;
for (jsh = 0; jsh < jshtot; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
n = di * dj * dk * dl * envs->ncomp;
if ((*fprescreen)(shls, envs->vhfopt,
envs->atm, envs->bas, envs->env)) {
(*intor)(pbuf, NULL, shls, envs->atm, envs->natm,
envs->bas, envs->nbas, envs->env,
envs->cintopt, NULL);
} else {
memset(pbuf, 0, sizeof(double complex)*n);
}
pbuf += n;
}
pbuf = buf;
for (jsh = 0; jsh < jshtot; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
for (icomp = 0; icomp < envs->ncomp; icomp++) {
peri = eri + nao2 * nkl * icomp
+ ao_loc[ish] * nao + ao_loc[jsh];
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pbuf1 = pbuf + di * dj * (l*dk+k);
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri[i*nao+j] = pbuf1[j*di+i];
} }
peri += nao2;
} }
pbuf += di * dj * dk * dl;
}
}
eri += nao2 * dk * dl;
}
free(buf);
}
static void fill_s2(int (*intor)(), int (*fprescreen)(), double complex *eri,
int nkl, int ish, int jshtot, struct _AO2MOEnvs *envs)
{
const int nao = envs->nao;
const size_t nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int klsh_start = envs->klsh_start;
const int klsh_end = klsh_start + envs->klsh_count;
const int di = ao_loc[ish+1] - ao_loc[ish];
int kl, jsh, ksh, lsh, dj, dk, dl;
int icomp, i, j, k, l, n;
int shls[4];
double complex *buf = malloc(sizeof(double complex)
*di*nao*nkl*envs->ncomp);
assert(buf);
double complex *pbuf, *pbuf1, *peri;
shls[0] = ish;
for (kl = klsh_start; kl < klsh_end; kl++) {
// kl = k * (k+1) / 2 + l
ksh = (int)(sqrt(2*kl+.25) - .5 + 1e-7);
lsh = kl - ksh * (ksh+1) / 2;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
pbuf = buf;
for (jsh = 0; jsh < jshtot; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
n = di * dj * dk * dl * envs->ncomp;
if ((*fprescreen)(shls, envs->vhfopt,
envs->atm, envs->bas, envs->env)) {
(*intor)(pbuf, NULL, shls, envs->atm, envs->natm,
envs->bas, envs->nbas, envs->env,
envs->cintopt, NULL);
} else {
memset(pbuf, 0, sizeof(double complex)*n);
}
pbuf += n;
}
pbuf = buf;
for (jsh = 0; jsh < jshtot; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
for (icomp = 0; icomp < envs->ncomp; icomp++) {
peri = eri + nao2 * nkl * icomp
+ ao_loc[ish] * nao + ao_loc[jsh];
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pbuf1 = pbuf + di * dj * (l*dk+k);
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri[i*nao+j] = pbuf1[j*di+i];
} }
peri += nao2;
} }
pbuf += di * dj * dk * dl;
}
}
eri += nao2 * dk * dl;
}
free(buf);
}
void AO2MOfill_r_s1(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s1(intor, fprescreen, eri, nkl, ish, envs->nbas, envs);
}
void AO2MOfill_r_s2ij(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s1(intor, fprescreen, eri, nkl, ish, ish+1, envs);
}
void AO2MOfill_r_s2kl(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s2(intor, fprescreen, eri, nkl, ish, envs->nbas, envs);
}
void AO2MOfill_r_s4(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs);
}
void AO2MOfill_r_a2ij(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s1(intor, fprescreen, eri, nkl, ish, ish+1, envs);
}
void AO2MOfill_r_a2kl(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s2(intor, fprescreen, eri, nkl, ish, envs->nbas, envs);
}
void AO2MOfill_r_a4ij(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs);
}
void AO2MOfill_r_a4kl(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs);
}
void AO2MOfill_r_a4(int (*intor)(), int (*fprescreen)(),
double complex *eri, int nkl, int ish,
struct _AO2MOEnvs *envs)
{
fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs);
}
/*
* time reversal symmetry for AOs
* tao index start from 1
*/
#define BeginTimeRevLoop(I, J) \
for (I##0 = I##start; I##0 < I##end;) { \
I##1 = abs(tao[I##0]); \
for (J##0 = J##start; J##0 < J##end;) { \
J##1 = abs(tao[J##0]);
#define EndTimeRevLoop(I, J) \
J##0 = J##1; } \
I##0 = I##1; }
static void timerev_mat(double complex *mat, int *tao, int *ao_loc, int nbas)
{
int nao = ao_loc[nbas];
int ish, jsh, istart, iend, jstart, jend;
int i, j, i0, j0, i1, j1;
double complex *pmat, *pmat1, *pbuf, *pbuf1;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < ish; jsh++) {
istart = ao_loc[ish ];
iend = ao_loc[ish+1];
jstart = ao_loc[jsh ];
jend = ao_loc[jsh+1];
if ((tao[jstart]<0) == (tao[istart]<0)) {
BeginTimeRevLoop(i, j);
pbuf = mat + j0 * nao + i0;
pbuf1 = pbuf + nao;
pmat = mat + (i1-1)*nao + (j1-1);
pmat1 = pmat - nao;
for (j = 0; j < j1-j0; j+=2) {
for (i = 0; i < i1-i0; i+=2) {
pbuf [j*nao+i ] = pmat [-i*nao-j ];
pbuf1[j*nao+i ] =-pmat [-i*nao-j-1];
pbuf [j*nao+i+1] =-pmat1[-i*nao-j ];
pbuf1[j*nao+i+1] = pmat1[-i*nao-j-1];
} }
EndTimeRevLoop(i, j);
} else {
BeginTimeRevLoop(i, j);
pbuf = mat + j0 * nao + i0;
pbuf1 = pbuf + nao;
pmat = mat + (i1-1)*nao + (j1-1);
pmat1 = pmat - nao;
for (j = 0; j < j1-j0; j+=2) {
for (i = 0; i < i1-i0; i+=2) {
pbuf [j*nao+i ] =-pmat [-i*nao-j ];
pbuf1[j*nao+i ] = pmat [-i*nao-j-1];
pbuf [j*nao+i+1] = pmat1[-i*nao-j ];
pbuf1[j*nao+i+1] =-pmat1[-i*nao-j-1];
} }
EndTimeRevLoop(i, j);
}
} }
}
static void atimerev_mat(double complex *mat, int *tao, int *ao_loc, int nbas)
{
int nao = ao_loc[nbas];
int ish, jsh, istart, iend, jstart, jend;
int i, j, i0, j0, i1, j1;
double complex *pmat, *pmat1, *pbuf, *pbuf1;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < ish; jsh++) {
istart = ao_loc[ish ];
iend = ao_loc[ish+1];
jstart = ao_loc[jsh ];
jend = ao_loc[jsh+1];
if ((tao[jstart]<0) == (tao[istart]<0)) {
BeginTimeRevLoop(i, j);
pbuf = mat + j0 * nao + i0;
pbuf1 = pbuf + nao;
pmat = mat + (i1-1)*nao + (j1-1);
pmat1 = pmat - nao;
for (j = 0; j < j1-j0; j+=2) {
for (i = 0; i < i1-i0; i+=2) {
pbuf [j*nao+i ] =-pmat [-i*nao-j ];
pbuf1[j*nao+i ] = pmat [-i*nao-j-1];
pbuf [j*nao+i+1] = pmat1[-i*nao-j ];
pbuf1[j*nao+i+1] =-pmat1[-i*nao-j-1];
} }
EndTimeRevLoop(i, j);
} else {
BeginTimeRevLoop(i, j);
pbuf = mat + j0 * nao + i0;
pbuf1 = pbuf + nao;
pmat = mat + (i1-1)*nao + (j1-1);
pmat1 = pmat - nao;
for (j = 0; j < j1-j0; j+=2) {
for (i = 0; i < i1-i0; i+=2) {
pbuf [j*nao+i ] = pmat [-i*nao-j ];
pbuf1[j*nao+i ] =-pmat [-i*nao-j-1];
pbuf [j*nao+i+1] =-pmat1[-i*nao-j ];
pbuf1[j*nao+i+1] = pmat1[-i*nao-j-1];
} }
EndTimeRevLoop(i, j);
}
} }
}
static void copy_mat(double complex *buf, double complex *mat,
int *ao_loc, int nbas)
{
int nao = ao_loc[nbas];
int ish, istart, iend, i, j;
for (ish = 0; ish < nbas; ish++) {
istart = ao_loc[ish ];
iend = ao_loc[ish+1];
for (i = istart; i < iend; i++) {
for (j = 0; j < iend; j++) {
buf[i*nao+j] = mat[i*nao+j];
} }
}
}
/*
* ************************************************
* s1, s2ij, s2kl, s4 here to label the AO symmetry
*/
void AO2MOtranse1_r_s1(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1);
size_t nao2 = envs->nao * envs->nao;
(*fmmm)(vout+ij_pair*row_id, vin+nao2*row_id, envs, 0);
}
void AO2MOtranse1_r_s2ij(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1);
size_t nao2 = nao * nao;
double complex *buf = malloc(sizeof(double complex) * nao*nao);
copy_mat(buf, vin+nao2*row_id, envs->ao_loc, envs->nbas);
timerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas);
(*fmmm)(vout+ij_pair*row_id, buf, envs, 0);
free(buf);
}
void AO2MOtranse1_r_s2kl(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_s1(fmmm, vout, vin, row_id, envs);
}
void AO2MOtranse1_r_s4(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_s2ij(fmmm, vout, vin, row_id, envs);
}
void AO2MOtranse1_r_a2ij(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1);
size_t nao2 = nao * nao;
double complex *buf = malloc(sizeof(double complex) * nao*nao);
copy_mat(buf, vin+nao2*row_id, envs->ao_loc, envs->nbas);
atimerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas);
(*fmmm)(vout+ij_pair*row_id, buf, envs, 0);
free(buf);
}
void AO2MOtranse1_r_a2kl(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_s1(fmmm, vout, vin, row_id, envs);
}
// anti-time-reversal between ij and time-reversal between kl
void AO2MOtranse1_r_a4ij(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_a2ij(fmmm, vout, vin, row_id, envs);
}
// time-reversal between ij and anti-time-reversal between kl
void AO2MOtranse1_r_a4kl(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_s2ij(fmmm, vout, vin, row_id, envs);
}
// anti-time-reversal between ij and anti-time-reversal between kl
void AO2MOtranse1_r_a4(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_a2ij(fmmm, vout, vin, row_id, envs);
}
void AO2MOtranse2_r_s1(int (*fmmm)(),
double complex *vout, double complex *vin, int row_id,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_r_s1(fmmm, vout, vin, row_id, envs);
}
/*
* ************************************************
* sort (shell-based) integral blocks then transform
*/
void AO2MOsortranse2_r_s1(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
int *ao_loc = envs->ao_loc;
size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1);
size_t nao2 = envs->nao * envs->nao;
int ish, jsh, di, dj;
int i, j;
double complex *buf = malloc(sizeof(double complex) * nao2);
double complex *pbuf;
vin += nao2 * row_id;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
for (jsh = 0; jsh < envs->nbas; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pbuf[i*nao+j] = vin[i*dj+j];
} }
vin += di * dj;
}
}
(*fmmm)(vout+ij_pair*row_id, buf, envs, 0);
free(buf);
}
void AO2MOsortranse2_r_s2ij(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_r_s1(fmmm, vout, vin, row_id, envs);
}
void AO2MOsortranse2_r_s2kl(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
int *ao_loc = envs->ao_loc;
size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1);
size_t nao2 = 0;
int ish, jsh, di, dj;
int i, j;
double complex *buf = malloc(sizeof(double complex) * nao * nao);
double complex *pbuf;
nao2 = nao * (nao+1) / 2;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
nao2 += di * (di-1) / 2; // upper triangle for diagonal shells
}
vin += nao2 * row_id;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
for (jsh = 0; jsh <= ish; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pbuf[i*nao+j] = vin[i*dj+j];
} }
vin += di * dj;
}
}
timerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas);
(*fmmm)(vout+ij_pair*row_id, buf, envs, 0);
free(buf);
}
void AO2MOsortranse2_r_s4(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_r_s2kl(fmmm, vout, vin, row_id, envs);
}
void AO2MOsortranse2_r_a2ij(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_r_s1(fmmm, vout, vin, row_id, envs);
}
void AO2MOsortranse2_r_a2kl(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
int *ao_loc = envs->ao_loc;
size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1);
size_t nao2 = 0;
int ish, jsh, di, dj;
int i, j;
double complex *buf = malloc(sizeof(double complex) * nao * nao);
double complex *pbuf;
nao2 = nao * (nao+1) / 2;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
nao2 += di * (di-1) / 2; // upper triangle for diagonal shells
}
vin += nao2 * row_id;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
for (jsh = 0; jsh <= ish; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pbuf[i*nao+j] = vin[i*dj+j];
} }
vin += di * dj;
}
}
atimerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas);
(*fmmm)(vout+ij_pair*row_id, buf, envs, 0);
free(buf);
}
// anti-time-reversal between ij and time-reversal between kl
void AO2MOsortranse2_r_a4ij(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_r_s2kl(fmmm, vout, vin, row_id, envs);
}
// time-reversal between ij and anti-time-reversal between kl
void AO2MOsortranse2_r_a4kl(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_r_a2kl(fmmm, vout, vin, row_id, envs);
}
// anti-time-reversal between ij and anti-time-reversal between kl
void AO2MOsortranse2_r_a4(int (*fmmm)(),
double complex *vout, double complex *vin,
int row_id, struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_r_a2kl(fmmm, vout, vin, row_id, envs);
}
/*
* Kramers pair should not be assumed
*/
void AO2MOr_e1_drv(int (*intor)(), void (*fill)(),
void (*ftrans)(), int (*fmmm)(),
double complex *eri, double complex *mo_coeff,
int klsh_start, int klsh_count, int nkl, int ncomp,
int *orbs_slice, int *tao, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int i_start = orbs_slice[0];
const int i_count = orbs_slice[1] - orbs_slice[0];
const int j_start = orbs_slice[2];
const int j_count = orbs_slice[3] - orbs_slice[2];
int nao = ao_loc[nbas];
int nmo = MAX(orbs_slice[1], orbs_slice[3]);
int i;
double *mo_r = malloc(sizeof(double) * nao * nmo);
double *mo_i = malloc(sizeof(double) * nao * nmo);
for (i = 0; i < nao*nmo; i++) {
mo_r[i] = creal(mo_coeff[i]);
mo_i[i] = cimag(mo_coeff[i]);
}
struct _AO2MOEnvs envs = {natm, nbas, atm, bas, env, nao,
klsh_start, klsh_count,
i_start, i_count, j_start, j_count,
ncomp, tao, ao_loc, mo_coeff,
mo_r, mo_i, cintopt, vhfopt};
double complex *eri_ao = malloc(sizeof(double complex)
* nao*nao*nkl*ncomp);
assert(eri_ao);
int ish, kl;
int (*fprescreen)();
if (vhfopt) {
fprescreen = vhfopt->fprescreen;
} else {
fprescreen = CVHFnoscreen;
}
#pragma omp parallel default(none) \
shared(fill, fprescreen, eri_ao, envs, intor, nkl, nbas) \
private(ish)
#pragma omp for nowait schedule(dynamic)
for (ish = 0; ish < nbas; ish++) {
(*fill)(intor, fprescreen, eri_ao, nkl, ish, &envs, 0);
}
#pragma omp parallel default(none) \
shared(ftrans, fmmm, eri, eri_ao, nkl, ncomp, envs) \
private(kl)
#pragma omp for nowait schedule(static)
for (kl = 0; kl < nkl*ncomp; kl++) {
(*ftrans)(fmmm, eri, eri_ao, kl, &envs);
}
free(eri_ao);
free(mo_r);
free(mo_i);
}
void AO2MOr_e2_drv(void (*ftrans)(), int (*fmmm)(),
double complex *vout, double complex *vin,
double complex *mo_coeff,
int nijcount, int nao,
int *orbs_slice, int *tao, int *ao_loc, int nbas)
{
int nmo = MAX(orbs_slice[1], orbs_slice[3]);
int i;
double *mo_r = malloc(sizeof(double) * nao * nmo);
double *mo_i = malloc(sizeof(double) * nao * nmo);
for (i = 0; i < nao*nmo; i++) {
mo_r[i] = creal(mo_coeff[i]);
mo_i[i] = cimag(mo_coeff[i]);
}
struct _AO2MOEnvs envs;
envs.bra_start = orbs_slice[0];
envs.bra_count = orbs_slice[1] - orbs_slice[0];
envs.ket_start = orbs_slice[2];
envs.ket_count = orbs_slice[3] - orbs_slice[2];
envs.nao = nao;
envs.nbas = nbas;
envs.tao = tao;
envs.ao_loc = ao_loc;
envs.mo_coeff = mo_coeff;
envs.mo_r = mo_r;
envs.mo_i = mo_i;
#pragma omp parallel default(none) \
shared(ftrans, fmmm, vout, vin, nijcount, envs) \
private(i)
#pragma omp for nowait schedule(static)
for (i = 0; i < nijcount; i++) {
(*ftrans)(fmmm, vout, vin, i, &envs);
}
free(mo_r);
free(mo_i);
}
|
chunks.c | /*************************************************************************
* chunks.c -
*
* $Id$
*
* Copyright (c) INRIA 2012
*
* AUTHOR:
* Gregoire Malandain (gregoire.malandain@inria.fr)
*
* CREATION DATE:
* Fri Nov 16 22:45:44 CET 2012
*
*
* ADDITIONS, CHANGES
*
*
*
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#ifdef MAC
#include <sys/types.h>
#include <sys/sysctl.h>
#else
#include <unistd.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include <vtmalloc.h>
#include <chunks.h>
/* debug / verbose variables
*/
static int _verbose_ = 1;
static int _debug_ = 0;
void setVerboseInChunks( int v )
{
_verbose_ = v;
}
void incrementVerboseInChunks( )
{
_verbose_ ++;
}
void decrementVerboseInChunks( )
{
_verbose_ --;
if ( _verbose_ < 0 ) _verbose_ = 0;
}
void setDebugInChunks( int v )
{
_debug_ = v;
}
void incrementDebugInChunks( )
{
_debug_ ++;
}
void decrementDebugInChunks( )
{
_debug_ --;
if ( _debug_ < 0 ) _debug_ = 0;
}
/* parallelism, scheduling
*/
static parallelismType _parallelism_ = _DEFAULT_PARALLELISM_;
static ompSchedulingType _omp_scheduling_ = _DYNAMIC_OMP_SCHEDULING_;
void setParallelism( parallelismType p )
{
_parallelism_ = p;
}
parallelismType getParallelism( )
{
return( _parallelism_ );
}
void setOmpScheduling( ompSchedulingType s )
{
_omp_scheduling_ = s;
}
ompSchedulingType getOmpScheduling( )
{
return( _omp_scheduling_ );
}
/* chunk related variable
*/
static int _max_chunks_ = -1;
static int _min_elements_ = 100;
void setMaxChunks( int c )
{
_max_chunks_ = c;
}
int getMaxChunks( )
{
return( _max_chunks_ );
}
void setMinElementsInChunks( int e )
{
_min_elements_ = e;
}
int getMinElementsInChunks( )
{
return( _min_elements_);
}
/************************************************************
*
* chunks processing
*
************************************************************/
#ifdef _OPENMP
static int _ompProcessChunks( _chunk_callfunction ftn, typeChunks *chunks, char *from )
{
int n;
if ( chunks->n_allocated_chunks > 1 ) {
switch( chunks->ompScheduling ) {
default :
case _DEFAULT_OMP_SCHEDULING_ :
if ( _debug_ >= 3 ) fprintf( stderr, "%s: default openmp scheduling\n", from );
#pragma omp parallel for
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( _debug_ >=2 ) {
fprintf( stderr, "%s: processing chunk #%d/%d", from, n+1, chunks->n_allocated_chunks );
fprintf( stderr, " attributed to thread #%d", omp_get_thread_num() );
fprintf( stderr, "\n" );
}
(void)(*ftn)( &(chunks->data[n]) );
}
break;
case _DYNAMIC_OMP_SCHEDULING_ :
if ( _debug_ >= 3 ) fprintf( stderr, "%s: dynamic openmp scheduling\n", from );
#pragma omp parallel for schedule(dynamic)
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( _debug_ >=2 ) {
fprintf( stderr, "%s: processing chunk #%d/%d", from, n+1, chunks->n_allocated_chunks );
fprintf( stderr, " attributed to thread #%d", omp_get_thread_num() );
fprintf( stderr, "\n" );
}
(void)(*ftn)( &(chunks->data[n]) );
}
break;
case _DYNAMIC_ONE_OMP_SCHEDULING_ :
if ( _debug_ >= 3 ) fprintf( stderr, "%s: (dynamic,1) openmp scheduling\n", from );
#pragma omp parallel for schedule(dynamic,1)
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( _debug_ >=2 ) {
fprintf( stderr, "%s: processing chunk #%d/%d", from, n+1, chunks->n_allocated_chunks );
fprintf( stderr, " attributed to thread #%d", omp_get_thread_num() );
fprintf( stderr, "\n" );
}
(void)(*ftn)( &(chunks->data[n]) );
}
break;
case _GUIDED_OMP_SCHEDULING_ :
if ( _debug_ >= 3 ) fprintf( stderr, "%s: (guided) openmp scheduling\n", from );
#pragma omp parallel for schedule(guided)
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( _debug_ >=2 ) {
fprintf( stderr, "%s: processing chunk #%d/%d", from, n+1, chunks->n_allocated_chunks );
fprintf( stderr, " attributed to thread #%d", omp_get_thread_num() );
fprintf( stderr, "\n" );
}
(void)(*ftn)( &(chunks->data[n]) );
}
break;
case _STATIC_OMP_SCHEDULING_ :
if ( _debug_ >= 3 ) fprintf( stderr, "%s: (static) openmp scheduling\n", from );
#pragma omp parallel for schedule(static)
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( _debug_ >=2 ) {
fprintf( stderr, "%s: processing chunk #%d/%d", from, n+1, chunks->n_allocated_chunks );
fprintf( stderr, " attributed to thread #%d", omp_get_thread_num() );
fprintf( stderr, "\n" );
}
(void)(*ftn)( &(chunks->data[n]) );
}
break;
}
}
else {
if ( _debug_ >= 3 )
fprintf( stderr, "%s: sequential loop\n", from );
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( _debug_ >= 4 ) {
fprintf( stderr, "%s: processing chunk #%d/%d", from, n+1, chunks->n_allocated_chunks );
fprintf( stderr, " in a sequential way" );
fprintf( stderr, "\n" );
}
(void)(*ftn)( &(chunks->data[n]) );
}
}
/* check the returned values in case of error
*/
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
if ( chunks->data[n].ret != 1 ) {
if ( _verbose_ ) {
fprintf( stderr, "%s: error when computing chunk #%d\n", from, n );
}
return( -1 );
}
}
return( 1 );
}
#endif
static int _pthreadProcessChunks( _chunk_callfunction ftn, typeChunks *chunks, char *from )
{
char *proc = "_pthreadProcessChunks";
int n;
int rc;
void *status;
pthread_t *thread;
pthread_attr_t attr;
if ( _debug_ >= 3 ) fprintf( stderr, "%s: pthread scheduling\n", from );
thread = (pthread_t*)vtmalloc( chunks->n_allocated_chunks * sizeof( pthread_t ), "thread", proc );
if ( thread == (pthread_t*)NULL ) {
if ( _verbose_ )
fprintf( stderr, "%s: unable to allocate array of %d threads\n", proc, chunks->n_allocated_chunks );
return( -1 );
}
pthread_attr_init( &attr );
pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
rc = pthread_create( &thread[n], &attr, ftn, &(chunks->data[n]) );
if ( rc ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when creating threads #%d/%d (returned code = %d)\n", proc, n, chunks->n_allocated_chunks, rc );
vtfree( thread );
return( -1 );
}
}
pthread_attr_destroy(&attr);
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
rc = pthread_join( thread[n], &status );
if ( rc ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when joining threads #%d/%d (returned code = %d)\n", proc, n, chunks->n_allocated_chunks, rc );
vtfree( thread );
return( -1 );
}
}
vtfree( thread );
return( 1 );
}
int processChunks( _chunk_callfunction ftn, typeChunks *chunks, char *from )
{
char *proc = "processChunks";
int n;
switch( _parallelism_ ) {
case _NO_PARALLELISM_ :
if ( _debug_ >= 3 )
fprintf( stderr, "%s: _NO_PARALLELISM_ case\n", proc );
for ( n=0; n<chunks->n_allocated_chunks; n++ ) {
(void)(*ftn)( &(chunks->data[n]) );
}
break;
default :
case _DEFAULT_PARALLELISM_ :
if ( _debug_ >= 3 )
fprintf( stderr, "%s: _DEFAULT_PARALLELISM_ case\n", proc );
#ifdef _OPENMP
case _OMP_PARALLELISM_ :
if ( _debug_ >= 3 )
fprintf( stderr, "%s: _OMP_PARALLELISM_ case\n", proc );
return ( _ompProcessChunks( ftn, chunks, from ) );
break;
#endif
case _PTHREAD_PARALLELISM_ :
if ( _debug_ >= 3 )
fprintf( stderr, "%s: _PTHREAD_PARALLELISM_ case\n", proc );
return ( _pthreadProcessChunks( ftn, chunks, from ) );
break;
}
return( 1 );
}
/************************************************************
*
* chunks construction (ad hoc function)
*
************************************************************/
#ifdef MAC
static int _getNCPU()
{
int n=1;
int mib[4];
size_t lmib = 4;
int ncpu;
size_t lncpu = sizeof( ncpu );
int nactivecpu;
size_t lnactivecpu = sizeof( nactivecpu );
int nphysicalcpu;
size_t lnphysicalcpu = sizeof( nphysicalcpu );
int nphysicalcpumax;
size_t lnphysicalcpumax = sizeof( nphysicalcpumax );
int nlogicalcpu;
size_t lnlogicalcpu = sizeof( nlogicalcpu );
int nlogicalcpumax;
size_t lnlogicalcpumax = sizeof( nlogicalcpumax );
int navailablecpu;
size_t lnavailablecpu = sizeof( ncpu );
switch( _parallelism_ ) {
case _NO_PARALLELISM_ :
return( 1 );
default :
case _DEFAULT_PARALLELISM_ :
#ifdef _OPENMP
case _OMP_PARALLELISM_ :
return( omp_get_max_threads() );
#endif
case _PTHREAD_PARALLELISM_ :
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
sysctl( mib, 2, &ncpu, &lncpu, NULL, 0);
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU;
sysctl( mib, 2, &navailablecpu, &lnavailablecpu, NULL, 0);
sysctlnametomib( "hw.ncpu", mib, &lmib );
sysctl( mib, 2, &ncpu, &lncpu, NULL, 0);
sysctlnametomib( "hw.activecpu", mib, &lmib );
sysctl( mib, 2, &nactivecpu, &lnactivecpu, NULL, 0);
sysctlnametomib( "hw.physicalcpu", mib, &lmib );
sysctl( mib, 2, &nphysicalcpu, &lnphysicalcpu, NULL, 0);
sysctlnametomib( "hw.physicalcpu_max", mib, &lmib );
sysctl( mib, 2, &nphysicalcpumax, &lnphysicalcpumax, NULL, 0);
sysctlnametomib( "hw.logicalcpu", mib, &lmib );
sysctl( mib, 2, &nlogicalcpu, &lnlogicalcpu, NULL, 0);
sysctlnametomib( "hw.logicalcpu_max", mib, &lmib );
sysctl( mib, 2, &nlogicalcpumax, &lnlogicalcpumax, NULL, 0);
if ( _debug_ >= 4 ) {
fprintf( stderr, "ncpu = %d\n", ncpu );
fprintf( stderr, "navailablecpu = %d\n", navailablecpu );
fprintf( stderr, "nactivecpu = %d\n", nactivecpu );
fprintf( stderr, "nphysicalcpu = %d\n", nphysicalcpu );
fprintf( stderr, "nphysicalcpumax = %d\n", nphysicalcpumax );
fprintf( stderr, "nlogicalcpu = %d\n", nlogicalcpu );
fprintf( stderr, "nlogicalcpumax = %d\n", nlogicalcpumax );
}
if ( n < ncpu ) n = ncpu;
if ( n < nactivecpu ) n = nactivecpu;
if ( n < nphysicalcpu ) n = nphysicalcpu;
if ( n < nphysicalcpumax ) n = nphysicalcpumax;
if ( n < nlogicalcpu ) n = nlogicalcpu;
if ( n < nlogicalcpumax ) n = nlogicalcpumax;
if ( n < navailablecpu ) n = navailablecpu;
return( n );
}
return( 1 );
}
#else
static int _getNCPU()
{
return( sysconf( _SC_NPROCESSORS_ONLN ) );
}
#endif
static void _setMaxChunks( )
{
switch( _parallelism_ ) {
case _NO_PARALLELISM_ :
_max_chunks_ = 1;
break;
default :
case _DEFAULT_PARALLELISM_ :
#ifdef _OPENMP
case _OMP_PARALLELISM_ :
if ( omp_get_max_threads() == 1 ) {
_max_chunks_ = 1;
}
else {
if ( _max_chunks_ <= 0 ) _max_chunks_ = 100;
}
break;
#endif
case _PTHREAD_PARALLELISM_ :
if ( _max_chunks_ <= 0 ) _max_chunks_ = _getNCPU();
break;
}
if ( _max_chunks_ == 1 ) _parallelism_ = _NO_PARALLELISM_;
}
int buildChunks( typeChunks *chunks, size_t first, size_t last, char *from )
{
char *proc = "buildChunks";
size_t size = last-first+1;
int n;
_setMaxChunks( );
n = _max_chunks_;
chunks->ompScheduling = _omp_scheduling_;
/* only one chunk
*/
if ( n == 1 ) {
if ( allocBuildOneChunk( chunks, first, last ) != 1 ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when allocating one chunk\n", proc );
return( -1 );
}
return( 1 );
}
/* what to do when there are too few elements ?
*/
if ( size < (size_t)n*_min_elements_ ) {
n = size / _min_elements_;
if ( size % _min_elements_ > 0 ) n ++;
if ( n == 1 ) {
if ( allocBuildOneChunk( chunks, first, last ) != 1 ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when allocating one chunk (too few element case)\n", proc );
return( -1 );
}
return( 1 );
}
}
if ( allocBuildEqualChunks( chunks, first, last, n ) != 1 ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when building %d chunks (call from %s)\n", proc, n, from );
return( -1 );
}
if ( _debug_ >= 3 ) {
fprintf( stderr, "\n" );
if ( from != (char*)NULL )
fprintf( stderr, "%s: has computed %d chunks from '%s'\n", proc, chunks->n_allocated_chunks, from );
else
fprintf( stderr, "%s: has computed %d chunks from '%s'\n", proc, chunks->n_allocated_chunks, from );
}
return( 1 );
}
/************************************************************
*
* chunks construction (generic functions)
*
************************************************************/
int allocBuildOneChunk( typeChunks *chunks, size_t first, size_t last )
{
char *proc = "oneChunk";
if ( allocChunks( chunks, 1 ) <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when allocating one chunk\n", proc );
return( -1 );
}
chunks->data[0].first = first;
chunks->data[0].last = last;
return( 1 );
}
int buildEqualChunks( typeChunk *chunk, size_t first, size_t last, int n )
{
char *proc = "buildEqualChunks";
size_t totalSize = last+1-first;
size_t f = first;
size_t i, chunkSize;
if ( n <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: negative number of chunks\n", proc );
return( -1 );
}
chunkSize = totalSize / n;
if ( chunkSize <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: negative or null chunk size (too many chunks ?)\n", proc );
return( -1 );
}
for ( i=0; i<(size_t)n; i++, f+=chunkSize ) {
/* chunk of size chunkSize+1
*/
if ( chunkSize * (n-i) < totalSize ) {
totalSize -= chunkSize + 1;
chunk[ i ].first = f;
chunk[ i ].last = f+(chunkSize+1)-1;
f++;
}
/* chunk of size chunkSize
*/
else {
totalSize -= chunkSize;
chunk[ i ].first = f;
chunk[ i ].last = f+chunkSize-1;
}
}
return( 1 );
}
int allocBuildEqualChunks( typeChunks *chunks, size_t first, size_t last, int nchunks )
{
char *proc = "allocBuildEqualChunks";
size_t size = last - first + 1;
int n = nchunks;
if ( n <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: negative number of chunks\n", proc );
return( -1 );
}
if ( size < (size_t)n ) {
n = size;
}
if ( allocChunks( chunks, n ) != 1 ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when allocating chunks\n", proc );
return( -1 );
}
if ( buildEqualChunks( &(chunks->data[0]), first, last, n ) != 1 ) {
freeChunks( chunks );
if ( _verbose_ )
fprintf( stderr, "%s: error when calculating chunks\n", proc );
return( -1 );
}
return( 1 );
}
int allocBuildInequalChunks( typeChunks *chunks, size_t first, size_t last,
double fraction, /* fraction of data to be put in one bucket */
int chunks_bucket, /* number of chunks for one bucket */
size_t minimal_chunk_size,
int maximal_chunk_number )
{
char *proc = "allocBuildInequalChunks";
size_t totalSize = last+1-first;
size_t f = first;
size_t i, chunkSize;
int maxchunks;
int n, nchunks;
if ( maximal_chunk_number <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: negative maximal number of chunks\n", proc );
return( -1 );
}
if ( chunks_bucket <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: negative number of chunks in a bucket\n", proc );
return( -1 );
}
if ( fraction < 0.0 || 1.0 <= fraction ) {
if ( _verbose_ )
fprintf( stderr, "%s: fraction should be in ]0,1[\n", proc );
return( -1 );
}
/* calculating the number of chunks
*/
for ( nchunks=0, maxchunks=maximal_chunk_number, totalSize=last+1-first; totalSize > 0 && maxchunks > 0; ) {
/* put the remaining data in the remaining chunks
-> equal repartition in maxchunks chunks
*/
if ( maxchunks <= chunks_bucket ) {
nchunks += maxchunks;
totalSize = 0;
if ( _debug_ >= 3 ) fprintf( stderr, "(1) add %d chunks -> %d\n", maxchunks, nchunks );
continue;
}
/* the remaining data fits in one bucket
-> equal repartition in chunks_bucket chunks
*/
if ( totalSize / chunks_bucket < minimal_chunk_size ) {
nchunks += chunks_bucket;
totalSize = 0;
if ( _debug_ >= 3 ) fprintf( stderr, "(2) add %d chunks -> %d\n", chunks_bucket, nchunks );
continue;
}
/* the considered fraction of data in the bucket
yield too small chunks
-> equal repartition in min( totalSize / minimal_chunk_size, maxchunks)
*/
if ( (fraction * totalSize) / chunks_bucket < minimal_chunk_size ) {
n = totalSize / minimal_chunk_size;
if ( n > maxchunks ) n = maxchunks;
nchunks += n;
totalSize = 0;
if ( _debug_ >= 3 ) fprintf( stderr, "(3) add %d chunks -> %d\n", n, nchunks );
continue;
}
/* after filling the bucket, there will be too much
remaining data
-> equal repartition in maxchunks
*/
if ( (maxchunks <= 2*chunks_bucket) &&
(fraction * totalSize) / chunks_bucket < ((1-fraction) * totalSize) / (maxchunks-chunks_bucket) ) {
nchunks += maxchunks;
totalSize = 0;
if ( _debug_ >= 3 ) fprintf( stderr, "(4) add %d chunks -> %d\n", maxchunks, nchunks );
continue;
}
/* generic case
*/
chunkSize = (fraction * totalSize) / chunks_bucket;
nchunks += chunks_bucket;
maxchunks -= chunks_bucket;
totalSize -= chunks_bucket * chunkSize;
if ( _debug_ >= 3 ) fprintf( stderr, "(5) add %d chunks -> %d\n", chunks_bucket, nchunks );
}
/* allocating the chunks
*/
if ( allocChunks( chunks, nchunks ) <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when allocating chunks\n", proc );
return( -1 );
}
/* filling chunks
*/
for ( nchunks=0, maxchunks=maximal_chunk_number, totalSize=last+1-first, f=first; totalSize > 0 && maxchunks > 0; ) {
/* put the remaining data in the remaining chunks
-> equal repartition in maxchunks chunks
*/
if ( maxchunks <= chunks_bucket ) {
if ( buildEqualChunks( &(chunks->data[nchunks]), f, last, maxchunks ) != 1 ) {
freeChunks( chunks );
if ( _verbose_ )
fprintf( stderr, "%s: error when calculating chunks\n", proc );
return( -1 );
}
totalSize = 0;
continue;
}
/* the remaining data fits in one bucket
-> equal repartition in chunks_bucket chunks
*/
if ( totalSize / chunks_bucket < minimal_chunk_size ) {
if ( buildEqualChunks( &(chunks->data[nchunks]), f, last, chunks_bucket ) != 1 ) {
freeChunks( chunks );
if ( _verbose_ )
fprintf( stderr, "%s: error when calculating chunks\n", proc );
return( -1 );
}
totalSize = 0;
continue;
}
/* the considered fraction of data in the bucket
yield too small chunks
-> equal repartition in min( totalSize / minimal_chunk_size, maxchunks)
*/
if ( (fraction * totalSize) / chunks_bucket < minimal_chunk_size ) {
n = totalSize / minimal_chunk_size;
if ( n > maxchunks ) n = maxchunks;
if ( buildEqualChunks( &(chunks->data[nchunks]), f, last, n ) != 1 ) {
freeChunks( chunks );
if ( _verbose_ )
fprintf( stderr, "%s: error when calculating chunks\n", proc );
return( -1 );
}
totalSize = 0;
continue;
}
/* after filling the bucket, there will be too much
remaining data
-> equal repartition in maxchunks
*/
if ( (maxchunks <= 2*chunks_bucket) &&
(fraction * totalSize) / chunks_bucket < ((1-fraction) * totalSize) / (maxchunks-chunks_bucket) ) {
if ( buildEqualChunks( &(chunks->data[nchunks]), f, last, maxchunks ) != 1 ) {
freeChunks( chunks );
if ( _verbose_ )
fprintf( stderr, "%s: error when calculating chunks\n", proc );
return( -1 );
}
totalSize = 0;
continue;
}
/* generic case
*/
chunkSize = (fraction * totalSize) / chunks_bucket;
for (i=0; i<(size_t)chunks_bucket; i++, f+=chunkSize, totalSize-=chunkSize, nchunks++, maxchunks-- ) {
chunks->data[ nchunks ].first = f;
chunks->data[ nchunks ].last = f+chunkSize-1;
}
}
return( 1 );
}
/************************************************************
*
* management
*
************************************************************/
void initChunk( typeChunk *chunk )
{
chunk->first = 1;
chunk->last = 0;
chunk->parameters = (void*)NULL;
chunk->ret = 0;
}
void initChunks( typeChunks *chunks )
{
chunks->data = (typeChunk*)NULL;
chunks->n_allocated_chunks = 0;
chunks->ompScheduling = _DEFAULT_OMP_SCHEDULING_;
}
void freeChunks( typeChunks *chunks )
{
if ( chunks->data != (typeChunk*)NULL ) vtfree( chunks->data );
initChunks( chunks );
}
int allocChunks( typeChunks *chunks, int n )
{
char *proc = "allocChunks";
int i;
if ( n <= 0 ) {
if ( _verbose_ )
fprintf( stderr, "%s: negative or null number of chunks\n", proc );
return( -1 );
}
chunks->data = (typeChunk*)vtmalloc( n *sizeof(typeChunk), "chunks->data", proc );
if ( chunks->data == (typeChunk*)NULL ) {
if ( _verbose_ )
fprintf( stderr, "%s: error when allocating %d chunks\n", proc, n );
return( -1 );
}
for ( i=0; i<n; i++ )
initChunk( &(chunks->data[i] ) );
chunks->n_allocated_chunks = n;
return( 1 );
}
void printChunks( FILE *theFile, typeChunks *chunks, char *s )
{
char *proc = "printChunks";
int i;
FILE *f = theFile;
if ( f == NULL ) f = stdout;
if ( s != (char *)NULL )
fprintf( f, "%s: information on '%s'\n", proc, s );
if ( chunks->n_allocated_chunks <= 0 || chunks->data == (typeChunk*)NULL ) {
fprintf( f, "empty chunks\n" );
return;
}
for ( i=0; i<chunks->n_allocated_chunks; i++ )
fprintf( f, "#%3d [%12lu %12lu] = %12lu\n",
i, chunks->data[i].first, chunks->data[i].last, chunks->data[i].last + 1 - chunks->data[i].first );
fprintf( f, "\n" );
}
/************************************************************
*
* test
*
************************************************************/
|
split.c | #include "incs.h"
#include "check.h"
#include "check_tree.h"
#include "preproc_j.h"
#ifdef SCALAR
#include "reorder.h"
#endif
#ifdef VECTOR
#include "reorder_isp.h"
#endif
#include "search.h"
#include "split.h"
extern node_t *g_tree; // this is where the decision tree is created
extern int g_n_tree;
extern int g_sz_tree;
extern config_t g_C;
int
split(
uint32_t **to, /* [m][n] */
uint8_t *g, // for debugging
uint32_t lb,
uint32_t ub,
uint32_t nT, // number of tails in this data set
uint32_t nH, // number of heads in this data set
uint32_t n,
uint32_t m,
uint64_t **Y, /* [m][n] */
uint64_t **tmpY, /* [n] */
uint32_t depth
)
{
int status = 0;
// No point splitting if it would create more nodes than permissible
if ( g_n_tree >= g_sz_tree ) {
// fprintf(stderr, "No space in tree. Returning... \n");
return status;
}
// if a split would cause one of the children to be smaller
// size than permissible, no point splitting
if ( (ub - lb) < 2 * g_C.min_leaf_size ) { return status; }
// If a split would cause one of the children to be of greater
// depth than permissible, no point splitting
if ( depth >= g_C.max_depth ) {
// fprintf(stderr, "Tree too deep. Cannot split further \n");
return status;
}
// If all instances have same goal
if ( ( nH == 0 ) || ( nT == 0 ) ) {
return status;
}
four_nums_t num4; memset(&num4, 0, sizeof(four_nums_t));
uint32_t split_j = m+1; // set to some bad value
uint32_t split_yidx, split_yval;
bool is_splittable = false;
// above is set by search(). tells us whether split_idx/yval are any good
if ( g_C.is_verbose ) {
printf("Splitting %u to %u \n", lb, ub);
}
if ( g_C.is_debug ) {
status = check(to, g, lb, ub, nT, nH, n, m, Y); cBYE(status);
}
//-----------------------------------------
status = search(Y, lb, ub, nT, nH, m, n,
&split_j, &split_yval, &split_yidx, &num4, &is_splittable);
cBYE(status);
if ( !is_splittable ) { // none of the features offer a valid split
return status;
}
//---------------------------------------------------
// START: Re-order the data based on the best split found in search()
// Note that some iterations are no-ops. This happens if
// (a) iteration corresponds to feature selected for split
// (b) all values of feature are the same
#pragma omp parallel for schedule(dynamic, 1) num_threads(g_C.num_cores)
for ( uint32_t j = 0; j < m; j++ ) {
int lstatus = 0;
uint32_t lidx = lb, ridx = split_yidx;
uint64_t *Yj = Y[j];
uint64_t *tmpYj = tmpY[j];
#ifndef DEBUG
// the order of the attribute chosen for split is unchanged
// Hence, we can skip reordering it
if ( j == split_j ) { continue; }
// Quick return if all values are the same
// This *may* mess up some of my invariants but won't affect
// correctness of tree
if ( Y[j][lb] == Y[j][ub-1] ) { continue; }
#endif
#ifdef SCALAR
lstatus = reorder(Y[j], tmpY[j], to[j], to[split_j], lb, ub,
split_yidx, &lidx, &ridx);
#endif
#ifdef VECTOR
reorder_isp(Y[j], tmpY[j], to[j], to[split_j], lb, ub,
split_yidx, &lidx, &ridx, &lstatus);
#endif
#ifdef DEBUG
if ( lstatus < 0 ) { WHEREAMI; status = -1; continue; }
if ( lidx != split_yidx ) { WHEREAMI; status = -1; continue; }
if ( ridx != ub ) { WHEREAMI; status = -1; continue; }
#endif
if ( j != split_j ) {
// SLOW: for ( uint32_t i = lb; i < ub; i++ ) { Yj[i] = tmpYj[i]; }
memcpy(Yj+lb, tmpYj+lb, (ub-lb) * sizeof(uint64_t));
}
else { // no need to re-order feature chosen for split
#ifdef DEBUG
for ( uint32_t i = lb; i < ub; i++ ) {
if ( Yj[i] != tmpYj[i] ) { WHEREAMI; status = -1; continue; }
}
#endif
}
}
// STOP : Re-order the data based on the best split found in search()
int parent_id = g_n_tree - 1;
if ( ( split_yidx - lb ) >= g_C.min_partition_size ) {
// set parent to lchild pointer and lchild to parent pointer
g_tree[g_n_tree-1].lchild_id = g_n_tree;
g_tree[g_n_tree].parent_id = parent_id;
// set nH and nT for this newly created left child
g_tree[g_n_tree].nT = num4.n_T_L;
g_tree[g_n_tree].nH = num4.n_H_L;
g_tree[g_n_tree].depth = depth + 1;
g_n_tree++;
// split the left child
status = split(to, g, lb, split_yidx, num4.n_T_L, num4.n_H_L,
n, m, Y, tmpY, depth+1);
cBYE(status);
#ifdef DEBUG
status = check_tree(g_tree, g_n_tree, m); cBYE(status);
#endif
}
// This check needs to be repeated because of left child creation
// No point splitting if it would create more nodes than permissible
if ( g_n_tree >= g_sz_tree ) {
// fprintf(stderr, "No space in tree. Returning... \n");
return status;
}
if ( ( ub - split_yidx ) >= g_C.min_partition_size ) {
// set parent to rchild pointer and rchild to parent pointer
g_tree[parent_id].rchild_id = g_n_tree;
g_tree[g_n_tree].parent_id = parent_id;
// set nH and nT for this newly created right child
g_tree[g_n_tree].nT = num4.n_T_R;
g_tree[g_n_tree].nH = num4.n_H_R;
g_tree[g_n_tree].depth = depth + 1;
g_n_tree++;
// split the right child
status = split(to, g, split_yidx, ub, num4.n_T_R, num4.n_H_R,
n, m, Y, tmpY, depth+1);
cBYE(status);
#ifdef DEBUG
status = check_tree(g_tree, g_n_tree, m); cBYE(status);
#endif
}
BYE:
return status;
}
|
schedule.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main (int argc, char** argv) {
int i;
#pragma omp parallel for schedule(static, 2) num_threads(4)
for (i = 0; i < 20; i++) {
printf("Test Static - thread:%d, iter: %d.\n", omp_get_thread_num(), i);
}
#pragma omp parallel for schedule(dynamic, 2) num_threads(4)
for (i = 0; i < 20; i++) {
printf("Test Dynamic - thread:%d, iter: %d.\n", omp_get_thread_num(), i);
}
#pragma omp parallel for schedule(guided, 2) num_threads(4)
for (i = 0; i < 20; i++) {
printf("Test Guided - thread:%d, iter: %d.\n", omp_get_thread_num(), i);
}
#pragma omp parallel for schedule(auto) num_threads(4)
for (i = 0; i < 20; i++) {
printf("Test Auto - thread:%d, iter: %d.\n", omp_get_thread_num(), i);
}
#pragma omp parallel for schedule(runtime) num_threads(4)
for (i = 0; i < 20; i++) {
printf("Test Runtime - thread:%d, iter: %d.\n", omp_get_thread_num(), i);
}
return 0;
}
|
GB_binop__div_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_int64
// A.*B function (eWiseMult): GB_AemultB__div_int64
// A*D function (colscale): GB_AxD__div_int64
// D*A function (rowscale): GB_DxB__div_int64
// C+=B function (dense accum): GB_Cdense_accumB__div_int64
// C+=b function (dense accum): GB_Cdense_accumb__div_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_int64
// C=scalar+B GB_bind1st__div_int64
// C=scalar+B' GB_bind1st_tran__div_int64
// C=A+scalar GB_bind2nd__div_int64
// C=A'+scalar GB_bind2nd_tran__div_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IDIV_SIGNED (x, y, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT64 || GxB_NO_DIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__div_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 64) ; \
}
GrB_Info GB_bind1st_tran__div_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 64) ; \
}
GrB_Info GB_bind2nd_tran__div_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(nexus_info[0],0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(&nexus_info[0][i]);
if (i < (ssize_t) number_threads)
nexus_info[i]->pixel_nexus=(&nexus_info[0][number_threads+i]);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->pixel_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=time((time_t *) NULL);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
/*
Compute the remainder of dividing offset by extent. It returns not only
the quotient (tile the offset falls in) but also the positive remainer
within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the
normal default truncated modulo division.
*/
modulo.quotient=offset/(ssize_t) extent;
if ((offset < 0L) && (modulo.quotient > (ssize_t) (-SSIZE_MAX)))
modulo.quotient--;
modulo.remainder=(ssize_t) (offset-(double) modulo.quotient*extent);
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,
nexus_info->pixel_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->pixel_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,nexus_info->pixel_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,nexus_info->pixel_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
return(ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,beta)));
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->pixel_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],
(MagickRealType) GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(SyncImagePixelCache(image,exception));
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,$ ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
assert(nexus_info->signature == MagickCoreSignature);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,cache_info->columns)*
cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
wave_billiard.c | /*********************************************************************************/
/* */
/* Animation of wave equation in a planar domain */
/* */
/* N. Berglund, december 2012, may 2021 */
/* */
/* UPDATE 24/04: distinction between damping and "elasticity" parameters */
/* UPDATE 27/04: new billiard shapes, bug in color scheme fixed */
/* UPDATE 28/04: code made more efficient, with help of Marco Mancini */
/* */
/* Feel free to reuse, but if doing so it would be nice to drop a */
/* line to nils.berglund@univ-orleans.fr - Thanks! */
/* */
/* compile with */
/* gcc -o wave_billiard wave_billiard.c */
/* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */
/* */
/* To make a video, set MOVIE to 1 and create subfolder tif_wave */
/* It may be possible to increase parameter PAUSE */
/* */
/* create movie using */
/* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */
/* */
/*********************************************************************************/
/*********************************************************************************/
/* */
/* NB: The algorithm used to simulate the wave equation is highly paralellizable */
/* One could make it much faster by using a GPU */
/* */
/*********************************************************************************/
#include <math.h>
#include <string.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <unistd.h>
#include <sys/types.h>
#include <tiffio.h> /* Sam Leffler's libtiff library. */
#include <omp.h>
#define MOVIE 0 /* set to 1 to generate movie */
/* General geometrical parameters */
#define WINWIDTH 1280 /* window width */
#define WINHEIGHT 720 /* window height */
#define NX 1280 /* number of grid points on x axis */
#define NY 720 /* number of grid points on y axis */
// #define NX 640 /* number of grid points on x axis */
// #define NY 360 /* number of grid points on y axis */
/* setting NX to WINWIDTH and NY to WINHEIGHT increases resolution */
/* but will multiply run time by 4 */
#define XMIN -2.0
#define XMAX 2.0 /* x interval */
#define YMIN -1.125
#define YMAX 1.125 /* y interval for 9/16 aspect ratio */
#define JULIA_SCALE 1.1 /* scaling for Julia sets */
/* Choice of the billiard table */
#define B_DOMAIN 16 /* choice of domain shape */
#define D_RECTANGLE 0 /* rectangular domain */
#define D_ELLIPSE 1 /* elliptical domain */
#define D_STADIUM 2 /* stadium-shaped domain */
#define D_SINAI 3 /* Sinai billiard */
#define D_DIAMOND 4 /* diamond-shaped billiard */
#define D_TRIANGLE 5 /* triangular billiard */
#define D_FLAT 6 /* flat interface */
#define D_ANNULUS 7 /* annulus */
#define D_POLYGON 8 /* polygon */
#define D_YOUNG 9 /* Young diffraction slits */
#define D_GRATING 10 /* diffraction grating */
#define D_EHRENFEST 11 /* Ehrenfest urn type geometry */
#define D_MENGER 15 /* Menger-Sierpinski carpet */
#define D_JULIA_INT 16 /* interior of Julia set */
/* Billiard tables for heat equation */
#define D_ANNULUS_HEATED 21 /* annulus with different temperatures */
#define D_MENGER_HEATED 22 /* Menger gasket with different temperatures */
#define D_MENGER_H_OPEN 23 /* Menger gasket with different temperatures and larger domain */
#define D_MANDELBROT 24 /* Mandelbrot set */
#define D_JULIA 25 /* Julia set */
#define D_MANDELBROT_CIRCLE 26 /* Mandelbrot set with circular conductor */
#define LAMBDA 1.0 /* parameter controlling the dimensions of domain */
#define MU 0.05 /* parameter controlling the dimensions of domain */
#define NPOLY 8 /* number of sides of polygon */
#define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */
#define MDEPTH 4 /* depth of computation of Menger gasket */
#define MRATIO 3 /* ratio defining Menger gasket */
#define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */
#define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */
#define FOCI 1 /* set to 1 to draw focal points of ellipse */
/* You can add more billiard tables by adapting the functions */
/* xy_in_billiard and draw_billiard below */
/* Physical parameters of wave equation */
#define OMEGA 0.9 /* frequency of periodic excitation */
#define COURANT 0.01 /* Courant number */
#define GAMMA 0.0 /* damping factor in wave equation */
// #define GAMMA 5.0e-10 /* damping factor in wave equation */
#define KAPPA 0.0 /* "elasticity" term enforcing oscillations */
// #define KAPPA 5.0e-6 /* "elasticity" term enforcing oscillations */
// #define KAPPA 5.0e-9 /* "elasticity" term enforcing oscillations */
// #define KAPPA 5.0e-8 /* "elasticity" term enforcing oscillations */
/* The Courant number is given by c*DT/DX, where DT is the time step and DX the lattice spacing */
/* The physical damping coefficient is given by GAMMA/(DT)^2 */
/* Increasing COURANT speeds up the simulation, but decreases accuracy */
/* For similar wave forms, COURANT^2*GAMMA should be kept constant */
/* Boundary conditions */
#define B_COND 2
#define BC_DIRICHLET 0 /* Dirichlet boundary conditions */
#define BC_PERIODIC 1 /* periodic boundary conditions */
#define BC_ABSORBING 2 /* absorbing boundary conditions (beta version) */
/* For debugging purposes only */
#define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */
#define VMAX 10.0 /* max value of wave amplitude */
/* Parameters for length and speed of simulation */
#define NSTEPS 4000 /* number of frames of movie */
#define NVID 25 /* number of iterations between images displayed on screen */
#define NSEG 100 /* number of segments of boundary */
#define PAUSE 1000 /* number of frames after which to pause */
#define PSLEEP 1 /* sleep time during pause */
#define SLEEP1 1 /* initial sleeping time */
#define SLEEP2 1 /* final sleeping time */
/* Color schemes */
#define BLACK 1 /* background */
#define COLOR_SCHEME 1 /* choice of color scheme */
#define C_LUM 0 /* color scheme modifies luminosity (with slow drift of hue) */
#define C_HUE 1 /* color scheme modifies hue */
#define SCALE 0 /* set to 1 to adjust color scheme to variance of field */
#define SLOPE 1.0 /* sensitivity of color on wave amplitude */
#define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */
#define COLORHUE 260 /* initial hue of water color for scheme C_LUM */
#define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */
#define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */
#define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */
#define HUEMEAN 230.0 /* mean value of hue for color scheme C_HUE */
#define HUEAMP 50.0 /* amplitude of variation of hue for color scheme C_HUE */
// #define HUEMEAN 320.0 /* mean value of hue for color scheme C_HUE */
// #define HUEAMP 100.0 /* amplitude of variation of hue for color scheme C_HUE */
/* Basic math */
#define PI 3.141592654
#define DPI 6.283185307
#define PID 1.570796327
double julia_x = -0.5, julia_y = 0.5; /* parameters for Julia sets */
// double julia_x = 0.33267, julia_y = 0.06395; /* parameters for Julia sets */
// double julia_x = 0.37468, julia_y = 0.21115; /* parameters for Julia sets */
#include "sub_wave.c"
double courant2; /* Courant parameter squared */
void init_wave(x, y, phi, psi, xy_in)
/* initialise field with drop at (x,y) - phi is wave height, psi is phi at time t-1 */
double x, y, *phi[NX], *psi[NX]; short int * xy_in[NX];
{
int i, j;
double xy[2], dist2;
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
phi[i][j] = 0.2*exp(-dist2/0.001)*cos(-sqrt(dist2)/0.01);
psi[i][j] = 0.0;
}
}
void init_wave_flat(phi, psi, xy_in)
/* initialise flat field - phi is wave height, psi is phi at time t-1 */
double *phi[NX], *psi[NX]; short int * xy_in[NX];
{
int i, j;
double xy[2], dist2;
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
phi[i][j] = 0.0;
psi[i][j] = 0.0;
}
}
void add_drop_to_wave(factor, x, y, phi, psi)
/* add drop at (x,y) to the field with given prefactor */
double factor, x, y, *phi[NX], *psi[NX];
{
int i, j;
double xy[2], dist2;
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
phi[i][j] += 0.2*factor*exp(-dist2/0.001)*cos(-sqrt(dist2)/0.01);
}
}
void oscillate_linear_wave(amplitude, t, x1, y1, x2, y2, phi, psi)
/* oscillating boundary condition at (x,y) */
double amplitude, t, x1, y1, x2, y2, *phi[NX], *psi[NX];
{
int i, j, ij1[2], ij2[2], imin, imax, jmin, jmax, d = 5;
double xy[2], dist2;
xy_to_ij(x1, y1, ij1);
xy_to_ij(x2, y2, ij2);
imin = ij1[0] - d; if (imin < 0) imin = 0;
imax = ij2[0] + d; if (imax >= NX) imax = NX-1;
jmin = ij1[1] - d; if (jmin < 0) jmin = 0;
jmax = ij2[1] + d; if (jmax >= NY) jmax = NY-1;
for (i = imin; i < imax; i++)
for (j = jmin; j < jmax; j++)
{
ij_to_xy(i, j, xy);
dist2 = (xy[0]-x1)*(xy[0]-x1); /* to be improved */
// dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
// if (dist2 < 0.01)
if (dist2 < 0.001)
phi[i][j] = amplitude*exp(-dist2/0.001)*cos(-sqrt(dist2)/0.01)*cos(t*OMEGA);
// phi[i][j] += 0.2*exp(-dist2/0.001)*cos(-sqrt(dist2)/0.01)*cos(t*OMEGA);
}
}
/*********************/
/* animation part */
/*********************/
void draw_wave(phi, psi, xy_in, scale, time)
/* draw the field */
double *phi[NX], *psi[NX], scale;
short int *xy_in[NX];
int time;
{
int i, j;
double rgb[3], xy[2], x1, y1, x2, y2;
glBegin(GL_QUADS);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
if (xy_in[i][j])
{
color_scheme(COLOR_SCHEME, phi[i][j], scale, time, rgb);
glColor3f(rgb[0], rgb[1], rgb[2]);
glVertex2i(i, j);
glVertex2i(i+1, j);
glVertex2i(i+1, j+1);
glVertex2i(i, j+1);
}
}
glEnd ();
}
void evolve_wave_half(phi_in, psi_in, phi_out, psi_out, xy_in)
/* time step of field evolution */
/* phi is value of field at time t, psi at time t-1 */
double *phi_in[NX], *psi_in[NX], *phi_out[NX], *psi_out[NX]; short int *xy_in[NX];
{
int i, j, iplus, iminus, jplus, jminus;
double delta, x, y, c, cc;
c = COURANT;
cc = courant2;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j]){
/* discretized Laplacian */
if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING))
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1); if (jplus == NY) jplus = NY-1;
jminus = (j-1); if (jminus == -1) jminus = 0;
}
else if (B_COND == BC_PERIODIC)
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
delta = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j];
x = phi_in[i][j];
y = psi_in[i][j];
/* evolve phi */
if (B_COND != BC_ABSORBING) phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - GAMMA*(x-y);
else
{
if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1))
phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - GAMMA*(x-y);
/* upper border */
else if (j==NY-1)
phi_out[i][j] = x - c*(x - phi_in[i][NY-2]) - KAPPA*x - GAMMA*(x-y);
/* lower border */
else if (j==0)
phi_out[i][j] = x - c*(x - phi_in[i][1]) - KAPPA*x - GAMMA*(x-y);
/* right border */
if (i==NX-1)
phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA*x - GAMMA*(x-y);
/* left border */
else if (i==0)
phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA*x - GAMMA*(x-y);
}
psi_out[i][j] = x;
if (FLOOR)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX;
if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX;
}
}
}
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
void evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in)
/* time step of field evolution */
/* phi is value of field at time t, psi at time t-1 */
double *phi[NX], *psi[NX], *phi_tmp[NX], *psi_tmp[NX]; short int *xy_in[NX];
{
evolve_wave_half(phi, psi, phi_tmp, psi_tmp, xy_in);
evolve_wave_half(phi_tmp, psi_tmp, phi, psi, xy_in);
}
void old_evolve_wave(phi, psi, xy_in)
/* time step of field evolution */
/* phi is value of field at time t, psi at time t-1 */
double *phi[NX], *psi[NX]; short int *xy_in[NX];
{
int i, j, iplus, iminus, jplus, jminus;
double delta, x, y;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j]){
/* discretized Laplacian */
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
delta = phi[iplus][j] + phi[iminus][j] + phi[i][jplus] + phi[i][jminus] - 4.0*phi[i][j];
x = phi[i][j];
y = psi[i][j];
/* evolve phi */
phi[i][j] = -y + 2*x + courant2*delta - KAPPA*x - GAMMA*(x-y);
/* Old versions of the simulation used this: */
// phi[i][j] = (-psi[i][j] + 2*phi[i][j] + courant2*delta)*damping;
// where damping = 1.0 - 0.0001;
psi[i][j] = x;
if (FLOOR)
{
if (phi[i][j] > VMAX) phi[i][j] = VMAX;
if (phi[i][j] < -VMAX) phi[i][j] = -VMAX;
if (psi[i][j] > VMAX) psi[i][j] = VMAX;
if (psi[i][j] < -VMAX) psi[i][j] = -VMAX;
}
}
}
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
double compute_variance(phi, psi, xy_in)
/* compute the variance of the field, to adjust color scheme */
double *phi[NX], *psi[NX]; short int * xy_in[NX];
{
int i, j, n = 0;
double variance = 0.0;
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
n++;
variance += phi[i][j]*phi[i][j];
}
}
if (n==0) n=1;
return(variance/(double)n);
}
void animation()
{
double time, scale;
double *phi[NX], *psi[NX], *phi_tmp[NX], *psi_tmp[NX];
short int *xy_in[NX];
int i, j, s;
/* Since NX and NY are big, it seemed wiser to use some memory allocation here */
for (i=0; i<NX; i++)
{
phi[i] = (double *)malloc(NY*sizeof(double));
psi[i] = (double *)malloc(NY*sizeof(double));
phi_tmp[i] = (double *)malloc(NY*sizeof(double));
psi_tmp[i] = (double *)malloc(NY*sizeof(double));
xy_in[i] = (short int *)malloc(NY*sizeof(short int));
}
courant2 = COURANT*COURANT;
/* initialize wave with a drop at one point, zero elsewhere */
// init_wave_flat(phi, psi, xy_in);
init_wave(0.0, 0.0, phi, psi, xy_in);
/* add a drop at another point */
// add_drop_to_wave(1.0, 0.7, 0.0, phi, psi);
// add_drop_to_wave(1.0, -0.7, 0.0, phi, psi);
// add_drop_to_wave(1.0, 0.0, -0.7, phi, psi);
blank();
glColor3f(0.0, 0.0, 0.0);
draw_wave(phi, psi, xy_in, 1.0, 0);
draw_billiard();
glutSwapBuffers();
sleep(SLEEP1);
for (i=0; i<=NSTEPS; i++)
{
//printf("%d\n",i);
/* compute the variance of the field to adjust color scheme */
/* the color depends on the field divided by sqrt(1 + variance) */
if (SCALE)
{
scale = sqrt(1.0 + compute_variance(phi,psi, xy_in));
// printf("Scaling factor: %5lg\n", scale);
}
else scale = 1.0;
// /* TO BE ADAPTED */
// scale = 1.0;
draw_wave(phi, psi, xy_in, scale, i);
for (j=0; j<NVID; j++)
{
evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in);
// if (i % 10 == 9) oscillate_linear_wave(0.2*scale, 0.15*(double)(i*NVID + j), -1.5, YMIN, -1.5, YMAX, phi, psi);
}
// for (j=0; j<NVID; j++) evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in);
draw_billiard();
glutSwapBuffers();
if (MOVIE)
{
save_frame();
/* it seems that saving too many files too fast can cause trouble with the file system */
/* so this is to make a pause from time to time - parameter PAUSE may need adjusting */
if (i % PAUSE == PAUSE - 1)
{
printf("Making a short pause\n");
sleep(PSLEEP);
s = system("mv wave*.tif tif_wave/");
}
}
}
if (MOVIE)
{
for (i=0; i<20; i++) save_frame();
s = system("mv wave*.tif tif_wave/");
}
for (i=0; i<NX; i++)
{
free(phi[i]);
free(psi[i]);
free(phi_tmp[i]);
free(psi_tmp[i]);
free(xy_in[i]);
}
}
void display(void)
{
glPushMatrix();
blank();
glutSwapBuffers();
blank();
glutSwapBuffers();
animation();
sleep(SLEEP2);
glPopMatrix();
glutDestroyWindow(glutGetWindow());
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WINWIDTH,WINHEIGHT);
glutCreateWindow("Wave equation in a planar domain");
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
register unsigned char
*p,
*q;
register ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
packet_size,
row_size;
register ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(image,token) \
{ \
(void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(Image *,MVGInfo *,const char *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_opacity=draw_info->fill_opacity;
clone_info->stroke_opacity=draw_info->stroke_opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,&draw_info->clipping_mask->exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,&draw_info->composite_mask->exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
magick_unreferenced(draw_info);
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorDatabase("#f00",&clone_info->stroke,
&image->exception);
else
status=QueryColorDatabase("#0f0",&clone_info->stroke,
&image->exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
&image->exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageClipMask(image,clipping_mask);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(clip_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageClipMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
status=SetImageBackgroundColor(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(clip_mask,TrueAlphaChannel);
if (draw_info->compliance != SVGCompliance)
status&=NegateImage(clip_mask,MagickFalse);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(composite_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
status=RenderMVGContent(composite_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(composite_mask,TrueAlphaChannel);
status&=NegateImage(composite_mask,MagickFalse);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
MagickPixelPacket
composite,
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,(double) gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,
(double) gradient->radius);
else
repeat=fmod(offset,(double) gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
*next_token,
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (status == MagickFalse)
return(MagickFalse);
}
primitive=(char *) NULL;
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=(&image->exception);
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MaxTextExtent;
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=QueryColorDatabase("#000000",&start_color,&image->exception);
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MaxTextExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
if (graphic_context[n]->fill_opacity != OpaqueOpacity)
graphic_context[n]->fill.opacity=ClampToQuantum(
graphic_context[n]->fill_opacity);
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_opacity*=(1.0-opacity);
else
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
if (graphic_context[n]->fill.opacity != TransparentOpacity)
graphic_context[n]->fill.opacity=(Quantum)
graphic_context[n]->fill_opacity;
else
graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,graphic_context[n]->composite_mask);
}
break;
}
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_opacity*=(1.0-opacity);
graphic_context[n]->stroke_opacity*=(1.0-opacity);
}
else
{
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
GradientType
type;
PixelPacket
stop_color;
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&stop_color,&image->exception);
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,&start_color,&stop_color);
start_color=stop_color;
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
if (graphic_context[n]->stroke_opacity != OpaqueOpacity)
graphic_context[n]->stroke.opacity=ClampToQuantum(
graphic_context[n]->stroke_opacity);
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_opacity*=(1.0-opacity);
else
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
if (graphic_context[n]->stroke.opacity != TransparentOpacity)
graphic_context[n]->stroke.opacity=(Quantum)
graphic_context[n]->stroke_opacity;
else
graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(image,&mvg_info,token);
if (coordinates == 0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryImageException(DrawError,
"NonconformingDrawingPrimitiveDefinition",keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
return(RenderMVGContent(image,draw_info,0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MaxTextExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_opacity)
{
double
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
double
mid;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0]);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
(void) GetFillColor(draw_info,x-start_x,y-start_y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,x,y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color);
fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q,
(MagickRealType) q->opacity,q);
(void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color);
stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange-
stroke_color.opacity));
MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q,
(MagickRealType) q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
exception=(&image->exception);
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelGray(&draw_info->fill) == MagickFalse) ||
(IsPixelGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,draw_info->clipping_mask);
status&=SetImageMask(image,draw_info->composite_mask);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel,
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
(void) SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine);
else
(void) CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.opacity != (Quantum) TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,(Image *) NULL);
status&=SetImageMask(image,(Image *) NULL);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,&clone_info->stroke_pattern->exception);
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p);
status&=DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_opacity=OpaqueOpacity;
draw_info->stroke_opacity=OpaqueOpacity;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
status=MagickTrue;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(Image *image,MVGInfo *mvg_info,const char *path)
{
char
*next_token,
token[MaxTextExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickStatusType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(image,token);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes+ \
MaxStrokePad,sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes+ \
MaxStrokePad,sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes+MaxStrokePad,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes+MaxStrokePad,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_unop__identity_uint32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_fp32)
// op(A') function: GB (_unop_tran__identity_uint32_fp32)
// C type: uint32_t
// A type: float
// cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_fp32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB080-func-arg-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A function arguments is passed by reference:
its data-sharing attribute is the same as its actual argument's.
i is shared. *q is shared.
Data race pair: *q(i)@59:4 vs. *q(i)@59:4
*/
#include "omprace.h"
#include <omp.h>
#include<stdio.h>
/* argument pass-by-reference */
void f1(int* q)
{
*q += 1;
}
int main()
{
omprace_init();
int i=0;
#pragma omp parallel
{
f1(&i);
}
printf ("i=%d\n",i);
omprace_fini();
return 0;
}
|
pr7.c | //Write an OpenMP program to show how first private clause works.(Factorial program)
#include <stdio.h>
#include <malloc.h>
#include <omp.h>
#include <stdlib.h>
long long factorial(long n)
{
long long i,out=1;
for (i=1; i<n+1; i++) out *= i;
return(out);
}
int main(int argc, char **argv)
{
int i,j,threads;
long long *x;
long long n=12;
/* Set number of threads equal to argv[1] if present */
if (argc > 1)
{
threads = atoi(argv[1]);
if (omp_get_dynamic())
{
omp_set_dynamic(0);
printf("called omp_set_dynamic(0)\n");
}
omp_set_num_threads(threads);
}
printf("%d threads\n",omp_get_max_threads());
x = (long long *) malloc(n * sizeof(long));
for (i=0;i<n;i++) x[i]=factorial(i);
j=0;
/* Is the output the same if the following line is commented out? */
#pragma omp parallel for firstprivate(x,j)
for (i=1; i<n; i++)
{
j += i;
x[i] = j*x[i-1];
}
for (i=0; i<n; i++)
printf("factorial(%2d)=%14lld x[%2d]=%14lld\n",i,factorial(i),i,x[i]);
return 0;
}
|
GB_binop__rminus_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp64)
// A*D function (colscale): GB (_AxD__rminus_fp64)
// D*A function (rowscale): GB (_DxB__rminus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp64)
// C=scalar+B GB (_bind1st__rminus_fp64)
// C=scalar+B' GB (_bind1st_tran__rminus_fp64)
// C=A+scalar GB (_bind2nd__rminus_fp64)
// C=A'+scalar GB (_bind2nd_tran__rminus_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP64 || GxB_NO_RMINUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolutiondepthwise_3x3_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3);
__m256 _sum4 = _bias0;
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 24, _sum3);
_sum4 = _mm256_comp_fmadd_ps(_k00, _r04, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r05, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k02, _r06, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k10, _r14, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k11, _r15, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k12, _r16, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k20, _r24, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k21, _r25, _sum4);
_sum4 = _mm256_comp_fmadd_ps(_k22, _r26, _sum4);
__m256 _sum5 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
_mm256_storeu_ps(outptr0 + 32, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k00, _r05, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r06, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k02, _r07, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k10, _r15, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k11, _r16, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k12, _r17, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k20, _r25, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k21, _r26, _sum5);
_sum5 = _mm256_comp_fmadd_ps(_k22, _r27, _sum5);
__m256 _sum6 = _bias0;
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 40, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k00, _r06, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r07, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k02, _r08, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k10, _r16, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k11, _r17, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k12, _r18, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k20, _r26, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k21, _r27, _sum6);
_sum6 = _mm256_comp_fmadd_ps(_k22, _r28, _sum6);
__m256 _sum7 = _bias0;
__m256 _r09 = _mm256_loadu_ps(r0 + 72);
__m256 _r19 = _mm256_loadu_ps(r1 + 72);
__m256 _r29 = _mm256_loadu_ps(r2 + 72);
_mm256_storeu_ps(outptr0 + 48, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k00, _r07, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r08, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k02, _r09, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k10, _r17, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k11, _r18, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k12, _r19, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k20, _r27, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k21, _r28, _sum7);
_sum7 = _mm256_comp_fmadd_ps(_k22, _r29, _sum7);
_mm256_storeu_ps(outptr0 + 56, _sum7);
r0 += 64;
r1 += 64;
r2 += 64;
outptr0 += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1);
__m256 _sum2 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k00, _r04, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r05, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k02, _r06, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k10, _r14, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k11, _r15, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k12, _r16, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k20, _r24, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k21, _r25, _sum2);
_sum2 = _mm256_comp_fmadd_ps(_k22, _r26, _sum2);
__m256 _sum3 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k00, _r06, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r07, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k02, _r08, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k10, _r16, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k11, _r17, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k12, _r18, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k20, _r26, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k21, _r27, _sum3);
_sum3 = _mm256_comp_fmadd_ps(_k22, _r28, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 2 * 32;
r1 += 2 * 32;
r2 += 2 * 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
irbuilder_nested_parallel_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefixes=CHECK %s
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -verify %s -emit-llvm -o - | FileCheck --check-prefixes=CHECK-DEBUG %s
// expected-no-diagnostics
// TODO: Teach the update script to check new functions too.
#ifndef HEADER
#define HEADER
// CHECK-LABEL: @_Z14parallel_for_0v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*))
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK: omp.par.outlined.exit:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_0v(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]), !dbg [[DBG12:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)), !dbg [[DBG13:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: ret void, !dbg [[DBG17:![0-9]+]]
//
void parallel_for_0(void) {
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i) {
}
}
}
// CHECK-LABEL: @_Z14parallel_for_1Pfid(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]])
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]]
// CHECK: omp.par.outlined.exit16:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_1Pfid(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], metadata [[META71:![0-9]+]], metadata !DIExpression()), !dbg [[DBG72:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], metadata [[META73:![0-9]+]], metadata !DIExpression()), !dbg [[DBG74:![0-9]+]]
// CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], metadata [[META75:![0-9]+]], metadata !DIExpression()), !dbg [[DBG76:![0-9]+]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]]), !dbg [[DBG77:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB6]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]), !dbg [[DBG78:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit16:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: ret void, !dbg [[DBG80:![0-9]+]]
//
void parallel_for_1(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i) {
*r = a + b;
}
}
}
}
// CHECK-LABEL: @_Z14parallel_for_2Pfid(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: [[I185:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8
// CHECK-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4
// CHECK-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4
// CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]])
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]]
// CHECK: omp.par.outlined.exit184:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: store i32 0, i32* [[I185]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0
// CHECK-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4
// CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]])
// CHECK-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]]
// CHECK: omp_loop.preheader190:
// CHECK-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1
// CHECK-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4
// CHECK-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 1)
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]]
// CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER191:%.*]]
// CHECK: omp_loop.header191:
// CHECK-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ]
// CHECK-NEXT: br label [[OMP_LOOP_COND192:%.*]]
// CHECK: omp_loop.cond192:
// CHECK-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]]
// CHECK-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]]
// CHECK: omp_loop.body193:
// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]]
// CHECK-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]])
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double
// CHECK-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8
// CHECK-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]]
// CHECK-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float
// CHECK-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8
// CHECK-NEXT: store float [[CONV202]], float* [[TMP11]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_INC194]]
// CHECK: omp_loop.inc194:
// CHECK-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER191]]
// CHECK: omp_loop.exit195:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]])
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM208]])
// CHECK-NEXT: br label [[OMP_LOOP_AFTER196:%.*]]
// CHECK: omp_loop.after196:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_2Pfid(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-DEBUG-NEXT: [[I185:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8
// CHECK-DEBUG-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4
// CHECK-DEBUG-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], metadata [[META134:![0-9]+]], metadata !DIExpression()), !dbg [[DBG135:![0-9]+]]
// CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], metadata [[META136:![0-9]+]], metadata !DIExpression()), !dbg [[DBG137:![0-9]+]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB13:[0-9]+]]), !dbg [[DBG138:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB13]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, double*, float**)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), i32* [[A_ADDR]], double* [[B_ADDR]], float** [[R_ADDR]]), !dbg [[DBG139:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit184:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[I185]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 0, i32* [[I185]], align 4, !dbg [[DBG146]]
// CHECK-DEBUG-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0, !dbg [[DBG147:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4, !dbg [[DBG148:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]]), !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.preheader190:
// CHECK-DEBUG-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB42:[0-9]+]]), !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 1), !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]], !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191:%.*]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.header191:
// CHECK-DEBUG-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ], !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_COND192:%.*]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.cond192:
// CHECK-DEBUG-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]], !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.body193:
// CHECK-DEBUG-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]], !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]]), !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !dbg [[DBG149:![0-9]+]]
// CHECK-DEBUG-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double, !dbg [[DBG149]]
// CHECK-DEBUG-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8, !dbg [[DBG150:![0-9]+]]
// CHECK-DEBUG-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]], !dbg [[DBG151:![0-9]+]]
// CHECK-DEBUG-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float, !dbg [[DBG149]]
// CHECK-DEBUG-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8, !dbg [[DBG152:![0-9]+]]
// CHECK-DEBUG-NEXT: store float [[CONV202]], float* [[TMP11]], align 4, !dbg [[DBG153:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_INC194]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.inc194:
// CHECK-DEBUG-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.exit195:
// CHECK-DEBUG-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]]), !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB42]]), !dbg [[DBG150]]
// CHECK-DEBUG-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB43:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM208]]), !dbg [[DBG150]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_AFTER196:%.*]], !dbg [[DBG147]]
// CHECK-DEBUG: omp_loop.after196:
// CHECK-DEBUG-NEXT: ret void, !dbg [[DBG154:![0-9]+]]
//
void parallel_for_2(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#endif
|
buggy_version.c | #include<stdio.h>
int main(){
int T[10];
// initializing array T
for (int i = 0; i < 10; i ++) {
T[i] = i;
}
// running the loop 10 times using openmp
// increase all element in array T by 1 each iteraion
#pragma omp parallel for shared (T)
for ( int i = 0; i < 10; i ++) {
for (int j =0; j < 10; j++) {
T[j] +=1;
}
}
}
|
set_vert_pres_BCs.h | #pragma omp target teams distribute parallel for thread_limit(BLOCK_SIZE)
for (int row = 1; row < NUM/2+1; row++)
{
int NUM_2 = NUM >> 1;
// p_0,j = p_1,j
pres_black(0, row) = pres_red(1, row);
pres_red(0, row) = pres_black(1, row);
// p_imax+1,j = p_imax,j
pres_black(NUM + 1, row) = pres_red(NUM, row);
pres_red(NUM + 1, row) = pres_black(NUM, row);
}
|
GB_binop__lxor_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int16)
// A*D function (colscale): GB (_AxD__lxor_int16)
// D*A function (rowscale): GB (_DxB__lxor_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int16)
// C=scalar+B GB (_bind1st__lxor_int16)
// C=scalar+B' GB (_bind1st_tran__lxor_int16)
// C=A+scalar GB (_bind2nd__lxor_int16)
// C=A'+scalar GB (_bind2nd_tran__lxor_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT16 || GxB_NO_LXOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bget_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int8)
// C=scalar+B GB (_bind1st__bget_int8)
// C=scalar+B' GB (_bind1st_tran__bget_int8)
// C=A+scalar GB (_bind2nd__bget_int8)
// C=A'+scalar GB (_bind2nd_tran__bget_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT8 || GxB_NO_BGET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bget_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
EulerSolver.h | #include "../DifferentialSolver.h"
template<typename Scalar>
class EulerSolver : public DifferentialSolver<Scalar>
{
public:
using DifferentialSolver<Scalar>::timeStep;
using DifferentialSolver<Scalar>::currTime;
EulerSolver(): DifferentialSolver<Scalar>() {}
void SetSystem(DifferentialSystem<Scalar>* system) override
{
this->system = system;
currCoords = new Scalar[system->GetMaxDimentionsCount()];
oldCoords = (system->GetHierarchyLevelsCount() > 1) ? new Scalar[system->GetMaxDimentionsCount()] : nullptr;
nextCoords = new Scalar[system->GetMaxDimentionsCount()];
derivatives = new Scalar[system->GetMaxDimentionsCount()];
currTime = 0;
}
~EulerSolver()
{
if (system->GetHierarchyLevelsCount() > 1)
{
delete [] oldCoords;
}
delete [] currCoords;
delete [] nextCoords;
delete [] derivatives;
}
int GetPhasesCount() const override
{
return 1;
}
void InitStep(Scalar timeStep, Scalar tolerance, bool updateInitialCoords) override
{
DifferentialSolver<Scalar>::InitStep(timeStep, tolerance, updateInitialCoords);
if (system->GetHierarchyLevelsCount() == 1)
{
system->GetCurrCoords(currTime, currCoords);
}
}
void InitStep(const SolverState& solverState) override
{
if (system->GetHierarchyLevelsCount() > 1)
{
system->GetCurrCoords(currTime, currCoords, oldCoords, solverState);
}
}
bool AdvancePhase(const SolverState& solverState) override
{
Scalar currStep = timeStep * (1 << solverState.hierarchyLevel);
system->GetCurrDerivatives(derivatives, solverState);
#pragma omp parallel for
for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++)
{
nextCoords[coordIndex] = currCoords[coordIndex] + derivatives[coordIndex] * currStep;
}
return true;
}
void AdvanceStep(const SolverState& solverState) override
{
if (solverState.IsPreInitial())
{
currTime += timeStep;
}
if (system->GetHierarchyLevelsCount() > 1)
{
system->SetCurrCoords(currTime, nextCoords, oldCoords, solverState);
} else
{
system->SetCurrCoords(currTime, nextCoords);
}
}
void RevertStep(Scalar) override
{
// never will be called
}
Scalar GetLastStepError() const override
{
return Scalar(1.0);
}
Scalar GetTimeStepPrediction() const override
{
return timeStep;
}
private:
Scalar* currCoords;
Scalar* oldCoords;
Scalar* nextCoords;
Scalar* derivatives;
DifferentialSystem<Scalar> *system;
};
|
generator_gemm_common.c | /******************************************************************************
** Copyright (c) 2015-2018, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
if ( strcmp( i_arch, "wsm" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knc" ) == 0) ||
(strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "icl" ) == 0) ) {
if ((strcmp( i_arch, "knc" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_IMCI;
} else if ((strcmp( i_arch, "knl" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_MIC;
} else if ((strcmp( i_arch, "knm" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_KNM;
} else if ((strcmp( i_arch, "skx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CORE;
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_ICL;
}
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
if ( strcmp( i_arch, "wsm" ) == 0 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knc" ) == 0) ||
(strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "icl" ) == 0) ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, IMCI/AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int i_use_masking_a_c ) {
if ( strcmp( i_arch, "wsm" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( strcmp( i_arch, "snb" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
}
} else if ( strcmp( i_arch, "hsw" ) == 0 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX2;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else if ( (strcmp( i_arch, "knc" ) == 0) ||
(strcmp( i_arch, "knl" ) == 0) ||
(strcmp( i_arch, "knm" ) == 0) ||
(strcmp( i_arch, "skx" ) == 0) ||
(strcmp( i_arch, "icl" ) == 0) ) {
if ((strcmp( i_arch, "knc" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_IMCI;
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_scalar, IMCI redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_xgemm_desc, i_arch, i_use_masking_a_c );
} else if ((strcmp( i_arch, "knl" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_MIC;
} else if ((strcmp( i_arch, "knm" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_KNM;
} else if ((strcmp( i_arch, "skx" ) == 0)) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_CORE;
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX512_ICL;
}
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
}
} else {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, (i_xgemm_desc->k)*(i_micro_kernel_config->datatype_size) );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#endif
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, (i_n_blocking*(i_xgemm_desc->ldb)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done,
const unsigned int i_k_unrolled ) {
/* advance C pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) );
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
if (i_k_unrolled == 0) {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* advance A pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
} else {
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* advance A pointer */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_a,
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = i_m_blocking / i_micro_kernel_config->vector_length;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it's possible to generated the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_IMCI ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_MIC ||
( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) && (i_m_blocking == i_micro_kernel_config->vector_length) ) ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 6) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif /*NDEBUG*/
/* load C accumulator */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
/* let convert the int32 accumulator into a FP32 values */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_ICL) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = i_m_blocking/i_micro_kernel_config->vector_length;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_IMCI ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_MIC ||
( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) && (i_m_blocking == i_micro_kernel_config->vector_length) ) ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 6) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
/* in case of IGEMM just do some potentail conversion to FP */
/* let convert the int32 accumulator into a FP32 values */
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_KNM) ||
(i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_ICL) ) &&
( (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* load address of scaling factor from stack */
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
48,
i_gp_reg_mapping->gp_reg_help_1,
0 );
/* broadcast scaling factor into a vector register */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_help_1,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name, 0,
0, 0 );
/* loop over the accumulator, convert and scale */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* convert current accumulator register into FP32 */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
LIBXSMM_X86_VEC_REG_UNDEF );
/* scale it */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VFMADD213PS,
0,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF,
0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n));
} else {
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
0,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
}
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->use_masking_a_c, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
|
smooth_filter_openmp.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
const size_t BMP_HEADER_SIZE = 54;
const size_t BMP_HEADER_WIDTH_OFFSET = 18;
const size_t BMP_HEADER_HEIGHT_OFFSET = 22;
const size_t PIXEL_REAL_SIZE = 3;
size_t getWidth(const unsigned char * header)
{
const unsigned char * start_position = header + BMP_HEADER_WIDTH_OFFSET;
unsigned int value = *((const unsigned int*)start_position);
return (size_t)value;
}
size_t getHeight(const unsigned char * header)
{
const unsigned char * start_position = header + BMP_HEADER_HEIGHT_OFFSET;
unsigned int value = *((const unsigned int*)start_position);
return (size_t)value;
}
struct RgbPixel
{
unsigned char b;
unsigned char g;
unsigned char r;
};
struct RgbPixel * allocateMemory(size_t size)
{
struct RgbPixel * data = (struct RgbPixel *)malloc(sizeof(struct RgbPixel) * size);
return data;
}
void deallocateMemory(struct RgbPixel ** data)
{
free(*data);
*data = NULL;
}
bool readBmp(
const char * filename,
unsigned char * header,
struct RgbPixel ** bmp
)
{
size_t read;
FILE * fileBmp = fopen(filename, "rb");
if (!fileBmp)
return false;
read = fread(header, sizeof(unsigned char), BMP_HEADER_SIZE, fileBmp);
if (read != BMP_HEADER_SIZE)
{
fclose(fileBmp);
return false;
}
size_t width = getWidth(header), height = getHeight(header);
size_t size = width * height;
*bmp = allocateMemory(size);
for (size_t i = 0; i < size; i++)
{
read = fread((*bmp) + i, sizeof(unsigned char), PIXEL_REAL_SIZE, fileBmp);
if (read != PIXEL_REAL_SIZE)
{
deallocateMemory(bmp);
fclose(fileBmp);
return false;
}
}
fclose(fileBmp);
return true;
}
bool writeBmp(
const char * filename,
const unsigned char * header,
struct RgbPixel * bmp
)
{
FILE * bmpFile = fopen(filename, "wb");
if (!bmpFile)
{
fclose(bmpFile);
return false;
}
fwrite(header, sizeof(unsigned char), BMP_HEADER_SIZE, bmpFile);
size_t width = getWidth(header), height = getHeight(header);
size_t size = width * height;
for (size_t i = 0; i < size; i++)
fwrite(bmp + i, sizeof(unsigned char), PIXEL_REAL_SIZE, bmpFile);
fclose(bmpFile);
return true;
}
void processImageSmoothFilter(struct RgbPixel * bmp, size_t width, size_t height, size_t radius, struct RgbPixel * result)
{
#define ROUND(x) (unsigned char)((x) + 0.5)
#pragma omp parallel for shared(bmp, width, height, radius, result) num_threads(4)
for (size_t i = 0; i < height; i++)
for (size_t j = 0; j < width; j++)
{
unsigned int r = 0, g = 0, b = 0;
size_t
starti = i < radius ? 0 : i - radius,
endi = i + radius >= height ? height - 1 : i + radius,
startj = j < radius ? 0 : j - radius,
endj = j + radius >= width ? width - 1 : j + radius;
size_t count = (endi - starti + 1) * (endj - startj + 1);
for (size_t ii = starti; ii <= endi; ii++)
for (size_t jj = startj; jj <= endj; jj++)
{
size_t position = ii * width + jj;
r += bmp[position].r;
g += bmp[position].g;
b += bmp[position].b;
}
size_t resultPosition = i * width + j;
result[resultPosition].r = ROUND((float)r / count);
result[resultPosition].g = ROUND((float)g / count);
result[resultPosition].b = ROUND((float)b / count);
}
#undef ROUND
}
int main(int argc, char * argv[])
{
if (argc != 4)
{
printf("%s\n", "Usage: <program name> <input bmp file name> <output bmp file name> <radius>");
return 0;
}
const char * inputBmpFileName = argv[1];
const char * outputBmpFileName = argv[2];
size_t radius = (size_t)atoi(argv[3]);
unsigned char header[BMP_HEADER_SIZE];
struct RgbPixel * bmp = NULL;
time_t totalBegin = time(NULL);
bool readResult = readBmp(inputBmpFileName, header, &bmp);
if (!readResult)
{
printf("Cannot read bmp file %s\n", inputBmpFileName);
return 1;
}
size_t width = getWidth(header), height = getHeight(header);
struct RgbPixel * resultBmp = allocateMemory(width * height);
time_t begin = time(NULL);
for (int i = 0; i < 100; i++)
processImageSmoothFilter(bmp, width, height, radius, resultBmp);
time_t end = time(NULL);
bool writeResult = writeBmp(outputBmpFileName, header, resultBmp);
if (!writeResult)
{
printf("Cannot write bmp file %s\n", outputBmpFileName);
return 1;
}
deallocateMemory(&bmp);
deallocateMemory(&resultBmp);
time_t totalEnd = time(NULL);
printf("Algorithm time: %.2f sec.\n", (double)(end - begin));
printf("Total time: %.2f sec.\n", (double)(totalEnd - totalBegin));
return 0;
}
|
packedstream_inl.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <nvbio/basic/cached_iterator.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
namespace nvbio {
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType, typename ValueType>
struct packer {
};
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,uint32>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 5u );
if (is_pow2<SYMBOL_SIZE>())
{
const uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (32u - SYMBOL_SIZE - uint32(bit_idx & 31u)) : uint32(bit_idx & 31u);
const uint32 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
else
{
const uint32 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 31u);
const uint32 symbol1 = (word1 >> symbol_offset) & SYMBOL_MASK;
// check if we need to read a second word
const uint32 read_bits = nvbio::min( 32u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint32 rem_mask = (1u << rem_bits) - 1u;
const uint32 word2 = stream[ word_idx+1 ];
const uint32 symbol2 = word2 & rem_mask;
return Symbol( symbol1 | (symbol2 << read_bits) );
}
else
return Symbol( symbol1 );
}
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 5u );
if (is_pow2<SYMBOL_SIZE>())
{
uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (32u - SYMBOL_SIZE - uint32(bit_idx & 31u)) : uint32(bit_idx & 31u);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
else
{
uint32 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 31u);
const uint32 symbol1 = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word1 &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word1 | symbol1;
// check if we need to write a second word
const uint32 read_bits = nvbio::min( 32u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint32 rem_mask = (1u << rem_bits) - 1u;
uint32 word2 = stream[ word_idx+1 ];
const uint32 symbol2 = uint32(sym & SYMBOL_MASK) >> read_bits;
// clear all bits
word2 &= ~rem_mask;
// set bits
stream[ word_idx+1 ] = word2 | symbol2;
}
}
}
};
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,uint64>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 6u );
if (is_pow2<SYMBOL_SIZE>())
{
const uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (64u - SYMBOL_SIZE - uint32(bit_idx & 63u)) : uint32(bit_idx & 63u);
const uint32 symbol = uint32((word >> symbol_offset) & SYMBOL_MASK);
return Symbol( symbol );
}
else
{
const uint64 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 63u);
const uint32 symbol1 = uint32((word1 >> symbol_offset) & SYMBOL_MASK);
// check if we need to read a second word
const uint32 read_bits = nvbio::min( 64u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint64 rem_mask = (uint64(1u) << rem_bits) - 1u;
const uint64 word2 = stream[ word_idx+1 ];
const uint32 symbol2 = uint32(word2 & rem_mask);
return Symbol( symbol1 | (symbol2 << read_bits) );
}
else
return Symbol( symbol1 );
}
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 6u );
if (is_pow2<SYMBOL_SIZE>())
{
uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (64u - SYMBOL_SIZE - uint32(bit_idx & 63u)) : uint32(bit_idx & 63u);
const uint64 symbol = uint64(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
else
{
uint64 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 63);
const uint64 symbol1 = uint64(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word1 &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
stream[ word_idx ] = word1 | symbol1;
// check if we need to write a second word
const uint32 read_bits = nvbio::min( 64u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint64 rem_mask = (uint64(1u) << rem_bits) - 1u;
uint64 word2 = stream[ word_idx+1 ];
const uint64 symbol2 = uint64(sym & SYMBOL_MASK) >> read_bits;
// clear all bits
word2 &= ~rem_mask;
// set bits
stream[ word_idx+1 ] = word2 | symbol2;
}
}
}
};
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,uint8>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint8 SYMBOL_COUNT = uint8(1u) << SYMBOL_SIZE;
const uint8 SYMBOL_MASK = SYMBOL_COUNT - uint8(1u);
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 3u );
if (is_pow2<SYMBOL_SIZE>())
{
const uint8 word = stream[ word_idx ];
const uint8 symbol_offset = BIG_ENDIAN_T ? (8u - SYMBOL_SIZE - uint8(bit_idx & 7u)) : uint8(bit_idx & 7u);
const uint8 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
else
{
const uint8 word1 = stream[ word_idx ];
const uint8 symbol_offset = uint8(bit_idx & 7u);
const uint8 symbol1 = (word1 >> symbol_offset) & SYMBOL_MASK;
// check if we need to read a second word
const uint32 read_bits = nvbio::min( 8u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint8 rem_mask = uint8((1u << rem_bits) - 1u);
const uint8 word2 = stream[ word_idx+1 ];
const uint8 symbol2 = word2 & rem_mask;
return Symbol( symbol1 | (symbol2 << read_bits) );
}
else
return Symbol( symbol1 );
}
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint8 SYMBOL_COUNT = uint8(1u) << SYMBOL_SIZE;
const uint8 SYMBOL_MASK = SYMBOL_COUNT - uint8(1u);
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 3u );
if (is_pow2<SYMBOL_SIZE>())
{
uint8 word = stream[ word_idx ];
const uint8 symbol_offset = BIG_ENDIAN_T ? (8u - SYMBOL_SIZE - uint8(bit_idx & 7u)) : uint8(bit_idx & 7u);
const uint8 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
else
{
uint8 word1 = stream[ word_idx ];
const uint8 symbol_offset = uint8(bit_idx & 7u);
const uint8 symbol1 = uint8(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word1 &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word1 | symbol1;
// check if we need to write a second word
const uint32 read_bits = nvbio::min( 8u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
uint8 word2 = stream[ word_idx+1 ];
const uint8 symbol2 = uint32(sym & SYMBOL_MASK) >> read_bits;
const uint8 rem_mask = uint8((1u << rem_bits) - 1u);
// clear all bits
word2 &= ~rem_mask;
// set bits
stream[ word_idx+1 ] = word2 | symbol2;
}
}
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,2u,Symbol,InputStream,IndexType,uint32>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
const uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,4u,Symbol,InputStream,IndexType,uint32>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 3u;
const uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 3u;
uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,2u,Symbol,InputStream,IndexType,uint4>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 6u;
const uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 63u) >> 4u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = (comp( word, symbol_comp ) >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 6u;
uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 63u) >> 4u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
select( word, symbol_comp ) &= ~(SYMBOL_MASK << symbol_offset);
select( word, symbol_comp ) |= symbol;
// set bits
stream[ word_idx ] = word;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,4u,Symbol,InputStream,IndexType,uint4>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
const uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 31u) >> 3u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = (comp( word, symbol_comp ) >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 31u) >> 3u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
select( word, symbol_comp ) &= ~(SYMBOL_MASK << symbol_offset);
select( word, symbol_comp ) |= symbol;
// set bits
stream[ word_idx ] = word;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,8u,Symbol,InputStream,IndexType,uint4>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 255u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
const uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 15u) >> 2u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (24u - (uint32(sym_idx & 3u) << 3)) : uint32((sym_idx & 3u) << 3);
const uint32 symbol = (comp( word, symbol_comp ) >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 255u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 15u) >> 2u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (24u - (uint32(sym_idx & 3u) << 3)) : uint32((sym_idx & 3u) << 3);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
select( word, symbol_comp ) &= ~(SYMBOL_MASK << symbol_offset);
select( word, symbol_comp ) |= symbol;
// set bits
stream[ word_idx ] = word;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,2u,Symbol,InputStream,IndexType,uint64>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
const uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (62u - (uint32(sym_idx & 31u) << 1)) : uint32((sym_idx & 31u) << 1);
const uint64 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (62u - (uint32(sym_idx & 31u) << 1)) : uint32((sym_idx & 31u) << 1);
const uint64 symbol = uint64(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,4u,Symbol,InputStream,IndexType,uint64>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
const uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (60u - (uint32(sym_idx & 15u) << 2)) : uint32((sym_idx & 15u) << 2);
const uint64 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (60u - (uint32(sym_idx & 15u) << 2)) : uint32((sym_idx & 15u) << 2);
const uint64 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::get(const index_type sym_idx) const
{
return packer<BIG_ENDIAN_T, SYMBOL_SIZE,Symbol,InputStream,IndexType,storage_type>::get_symbol( m_stream, sym_idx + m_index );
}
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::set(const index_type sym_idx, const Symbol sym)
{
return packer<BIG_ENDIAN_T, SYMBOL_SIZE,Symbol,InputStream,IndexType,storage_type>::set_symbol( m_stream, sym_idx + m_index, sym );
}
// pre-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ ()
{
++m_index;
return *this;
}
// post-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ (int dummy)
{
This r( m_stream, m_index );
++m_index;
return r;
}
// pre-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- ()
{
--m_index;
return *this;
}
// post-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- (int dummy)
{
This r( m_stream, m_index );
--m_index;
return r;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+= (const sindex_type distance)
{
m_index += distance;
return *this;
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-= (const sindex_type distance)
{
m_index -= distance;
return *this;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+ (const sindex_type distance) const
{
return This( m_stream, m_index + distance );
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const sindex_type distance) const
{
return This( m_stream, m_index - distance );
}
// difference
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
typename PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::sindex_type
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const PackedStream it) const
{
return sindex_type( m_index - it.m_index );
}
// assignment operator
//
template <typename Stream>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE PackedStreamRef<Stream>& PackedStreamRef<Stream>::operator= (const PackedStreamRef& ref)
{
return (*this = Symbol( ref ));
}
// assignment operator
//
template <typename Stream>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE PackedStreamRef<Stream>& PackedStreamRef<Stream>::operator= (const Symbol s)
{
m_stream.set( s );
return *this;
}
// conversion operator
//
template <typename Stream>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE PackedStreamRef<Stream>::operator Symbol() const
{
return m_stream.get();
}
/// less than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator< (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() < it2.index();
}
/// greater than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator> (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() > it2.index();
}
/// equality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator== (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() == it2.stream() && it1.index() == it2.index();
}
/// inequality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator!= (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() != it2.stream() || it1.index() != it2.index();
}
template <bool BIG_ENDIAN, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType, typename ValueType>
struct forward_packer
{
typedef ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE,BIG_ENDIAN,IndexType> forward_stream_type;
static const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
static const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
static const uint32 WORD_SIZE = 8u * uint32( sizeof(ValueType) );
static const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void rebase(forward_stream_type& it)
{
const uint32 symbol_idx = it.m_index & (SYMBOLS_PER_WORD-1);
it.m_word_index = it.m_index / SYMBOLS_PER_WORD;
it.m_word_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - symbol_idx * SYMBOL_SIZE) : symbol_idx * SYMBOL_SIZE;
it.m_word = it.m_stream[ it.m_word_index ];
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void next(forward_stream_type& it)
{
it.m_index++;
if (BIG_ENDIAN)
{
if (it.m_word_offset > 0)
it.m_word_offset -= SYMBOL_SIZE;
else
{
// need a new word
++it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = WORD_SIZE - SYMBOL_SIZE;
}
}
else
{
if (it.m_word_offset < WORD_SIZE - SYMBOL_SIZE)
it.m_word_offset += SYMBOL_SIZE;
else
{
// need a new word
++it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = 0;
}
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void prev(forward_stream_type& it)
{
it.m_index--;
if (BIG_ENDIAN)
{
if (it.m_word_offset < WORD_SIZE - SYMBOL_SIZE)
it.m_word_offset += SYMBOL_SIZE;
else
{
// need a new word
--it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = 0u;
}
}
else
{
if (it.m_word_offset > 0)
it.m_word_offset -= SYMBOL_SIZE;
else
{
// need a new word
--it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = WORD_SIZE - SYMBOL_SIZE;
}
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static Symbol fetch(const forward_stream_type& it)
{
return Symbol( (it.m_word >> it.m_word_offset) & SYMBOL_MASK );
}
};
template <bool BIG_ENDIAN, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct forward_packer<BIG_ENDIAN, SYMBOL_SIZE, Symbol, InputStream, IndexType, uint4>
{
typedef ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE,BIG_ENDIAN,IndexType> forward_stream_type;
static const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
static const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
static const uint32 WORD_SIZE = 128;
static const uint32 SUBWORD_SIZE = 32;
static const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
static const uint32 SYMBOLS_PER_SUBWORD = SUBWORD_SIZE / SYMBOL_SIZE;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void rebase(forward_stream_type& it)
{
const uint32 symbol_idx = it.m_index & (SYMBOLS_PER_WORD-1);
it.m_word_index = it.m_index / SYMBOLS_PER_WORD;
it.m_word_offset = it.m_index & (SYMBOLS_PER_WORD-1);
it.m_word = it.m_stream[ it.m_word_index ];
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void next(forward_stream_type& it)
{
it.m_index++;
if (it.m_word_offset < SYMBOLS_PER_WORD-1)
it.m_word_offset++;
else
{
// need a new word
++it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = 0;
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void prev(forward_stream_type& it)
{
it.m_index--;
if (it.m_word_offset > 0)
it.m_word_offset--;
else
{
// need a new word
--it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = SYMBOLS_PER_WORD - 1u;
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static Symbol fetch(const forward_stream_type& it)
{
const uint32 word_comp = comp( it.m_word, it.m_word_offset / SYMBOLS_PER_SUBWORD );
const uint32 word_mod = it.m_word_offset & (SYMBOLS_PER_SUBWORD-1);
const uint32 word_offset = BIG_ENDIAN ? (SUBWORD_SIZE - SYMBOL_SIZE - word_mod * SYMBOL_SIZE) :
(word_mod * SYMBOL_SIZE);
return Symbol( (word_comp >> word_offset) & SYMBOL_MASK );
}
};
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
Symbol ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::get() const
{
return forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,storage_type>::fetch( *this );
}
// rebase the iterator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::rebase(void)
{
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::rebase(*this);
}
// pre-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ ()
{
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::next(*this);
return *this;
}
// post-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ (int dummy)
{
This r( m_stream, m_index );
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::next(*this);
return r;
}
// pre-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- ()
{
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::prev(*this);
return *this;
}
// post-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- (int dummy)
{
This r( m_stream, m_index );
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::prev(*this);
return r;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+= (const sindex_type distance)
{
m_index += distance;
rebase();
return *this;
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-= (const sindex_type distance)
{
m_index -= distance;
rebase();
return *this;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+ (const sindex_type distance) const
{
return This( m_stream, m_index + distance );
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const sindex_type distance) const
{
return This( m_stream, m_index - distance );
}
// difference
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
typename ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::sindex_type
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const ForwardPackedStream it) const
{
return sindex_type( m_index - it.m_index );
}
/// less than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator< (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() < it2.index();
}
/// greater than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator> (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() > it2.index();
}
/// equality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator== (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() == it2.stream() && it1.index() == it2.index();
}
/// inequality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator!= (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() != it2.stream() || it1.index() != it2.index();
}
namespace priv {
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void serial_assign(
const IndexType input_len,
InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
typedef PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_stream_type;
typedef typename packed_stream_type::storage_type word_type;
const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
const bool BIG_ENDIAN = BIG_ENDIAN_T;
const uint32 SYMBOL_SIZE = SYMBOL_SIZE_T;
const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
InputStream words = packed_string.stream();
const IndexType stream_offset = packed_string.index();
const uint32 word_offset = stream_offset & (SYMBOLS_PER_WORD-1);
uint32 word_rem = 0;
if (word_offset)
{
// compute how many symbols we still need to encode to fill the current word
word_rem = SYMBOLS_PER_WORD - word_offset;
// fetch the word in question
word_type word = words[ stream_offset / SYMBOLS_PER_WORD ];
// loop through the word's bp's
for (uint32 i = 0; i < word_rem; ++i)
{
// fetch the bp
const uint8 bp = input_string[i] & SYMBOL_MASK;
const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// clear all bits
word &= ~(word_type(SYMBOL_MASK) << symbol_offset);
// set bits
word |= symbol;
}
// write out the word
words[ stream_offset / SYMBOLS_PER_WORD ] = word;
}
#if defined(_OPENMP) && !defined(NVBIO_DEVICE_COMPILATION)
// we use this solution because the 'if' clause in the 'pragma omp for' results in 30% slowdown
// when the if is not taken and the loop is executed serially
if (input_len > 1000000)
{
#pragma omp parallel for
for (int64 i = word_rem; i < int64( input_len ); i += SYMBOLS_PER_WORD)
{
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, uint32( input_len - IndexType(i) ) );
// loop through the word's bp's
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
if (j < n_symbols)
{
// fetch the bp
const uint8 bp = input_string[IndexType(i) + j] & SYMBOL_MASK;
const uint32 bit_idx = j * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// set bits
word |= symbol;
}
}
// write out the word
const uint32 word_idx = uint32( (stream_offset + IndexType(i)) / SYMBOLS_PER_WORD );
words[ word_idx ] = word;
}
}
else
#endif
{
for (IndexType i = word_rem; i < input_len; i += SYMBOLS_PER_WORD)
{
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, uint32( input_len - IndexType(i) ) );
// get the offset to the first symbol
uint32 symbol_offset = BIG_ENDIAN ? WORD_SIZE - SYMBOL_SIZE : 0u;
// loop through the word's bp's
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
if (j < n_symbols)
{
// fetch the bp
const uint8 bp = input_string[IndexType(i) + j] & SYMBOL_MASK;
//const uint32 bit_idx = j * SYMBOL_SIZE;
//const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// set bits
word |= symbol;
// move the offset
if (BIG_ENDIAN) symbol_offset -= SYMBOL_SIZE;
else symbol_offset += SYMBOL_SIZE;
}
}
// write out the word
const uint32 word_idx = uint32( (stream_offset + IndexType(i)) / SYMBOLS_PER_WORD );
words[ word_idx ] = word;
}
}
}
} // namespace priv
#if defined(__CUDACC__)
namespace priv {
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
__global__
void assign_kernel(
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
typedef PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_stream_type;
typedef typename packed_stream_type::storage_type word_type;
const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
const bool BIG_ENDIAN = BIG_ENDIAN_T;
const uint32 SYMBOL_SIZE = SYMBOL_SIZE_T;
const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
const IndexType stream_offset = packed_string.index(); // stream offset, in symbols
const uint32 word_offset = stream_offset & (SYMBOLS_PER_WORD-1); // offset within the first word
const uint32 word_rem = SYMBOLS_PER_WORD - word_offset; // # of remaining symbols to fill the first word
InputStream words = packed_string.stream();
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id == 0)
{
// fetch the word in question
word_type word = words[ stream_offset / SYMBOLS_PER_WORD ];
// loop through the word's bp's
for (uint32 i = 0; i < word_rem; ++i)
{
// fetch the bp
const uint8 bp = input_string[i] & SYMBOL_MASK;
const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// clear all bits
word &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
word |= symbol;
}
// write out the word
words[ stream_offset / SYMBOLS_PER_WORD ] = word;
}
else
{
// check whether this thread should do something
if (word_rem + (thread_id - 1u) * SYMBOLS_PER_WORD >= input_len)
return;
const uint32 i = word_rem + (thread_id - 1u) * SYMBOLS_PER_WORD;
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, uint32( input_len - IndexType(i) ) );
// loop through the word's bp's
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
if (j < n_symbols)
{
// fetch the bp
const uint8 bp = input_string[IndexType(i) + j] & SYMBOL_MASK;
const uint32 bit_idx = j * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// set bits
word |= symbol;
}
}
// write out the word
const uint32 word_idx = uint32( (stream_offset + IndexType(i)) / SYMBOLS_PER_WORD );
words[ word_idx ] = word;
}
}
// assign a sequence to a packed stream
// NOTE: this is a host ONLY function - marking it as host/device would cause compiler misbehaviours
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
void device_assign(
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
if (input_len == 0)
return;
typedef PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_stream_type;
typedef typename packed_stream_type::storage_type word_type;
const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
const uint32 SYMBOL_SIZE = SYMBOL_SIZE_T;
const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
const IndexType stream_offset = packed_string.index(); // stream offset, in symbols
const uint32 word_begin = util::divide_rz( stream_offset, SYMBOLS_PER_WORD );
const uint32 word_end = util::divide_ri( stream_offset + input_len, SYMBOLS_PER_WORD );
const uint32 n_words = word_end - word_begin;
const uint32 blockdim = 128u;
const uint32 n_blocks = util::divide_ri( n_words, blockdim );
priv::assign_kernel<<<n_blocks,blockdim>>>( input_len, input_string, packed_string );
cuda::check_error("assign_kernel()");
}
} // namespace priv
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void assign(
const device_tag tag,
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
#if !defined(NVBIO_DEVICE_COMPILATION)
//
// this function is being called on the host: spawn a kernel
//
priv::device_assign( input_len, input_string, packed_string );
#else
//
// this function is being called on the device: call the serial implementation
//
priv::serial_assign( input_len, input_string, packed_string );
#endif
}
#endif // defined(__CUDACC__)
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void assign(
const host_tag tag,
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
priv::serial_assign( input_len, input_string, packed_string );
}
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void assign(
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
// find the system tag of the output packed stream
typedef typename iterator_system<InputStream>::type system_tag;
// and chose which function to call based on it
assign( system_tag(), input_len, input_string, packed_string );
}
//
// A utility function to transpose a set of packed input streams:
// the symbols of the i-th input stream is supposed to be stored contiguously in the range [offset(i), offset + N(i)]
// the *words* of i-th output stream will be stored in strided fashion at out_stream[tid, tid + (N(i)+symbols_per_word-1/symbols_per_word) * stride]
//
// \param stride output stride
// \param N length of this thread's string in the input stream
// \param in_offset offset of this thread's string in the input stream
// \param in_stream input stream
// \param out_stream output stream
//
template <uint32 BLOCKDIM, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename InStreamIterator, typename OutStreamIterator>
NVBIO_HOST_DEVICE
void transpose_packed_streams(const uint32 stride, const uint32 N, const uint32 in_offset, const InStreamIterator in_stream, OutStreamIterator out_stream)
{
typedef typename std::iterator_traits<InStreamIterator>::value_type word_type;
const uint32 SYMBOLS_PER_WORD = (sizeof(word_type)*8) / SYMBOL_SIZE;
uint32 word_offset = in_offset & (SYMBOLS_PER_WORD-1);
uint32 begin_word = in_offset / SYMBOLS_PER_WORD;
uint32 end_word = (in_offset + N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
// write out the output symbols
const uint32 N_words = (N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
word_type cur_word = in_stream[begin_word+0];
for (uint32 w = 0; w < N_words; ++w)
{
if (BIG_ENDIAN == false)
{
// fill the first part of the output word
word_type out_word = cur_word >> (word_offset*SYMBOL_SIZE);
// fetch the next word
cur_word = begin_word+w+1 < end_word ? in_stream[begin_word+w+1] : 0u;
// fill the second part of the output word
if (word_offset)
out_word |= cur_word << ((SYMBOLS_PER_WORD - word_offset)*SYMBOL_SIZE);
out_stream[ stride*w ] = out_word;
}
else
{
// fill the first part of the output word
word_type out_word = cur_word << (word_offset*SYMBOL_SIZE);
// fetch the next word
cur_word = begin_word+w+1 < end_word ? in_stream[begin_word+w+1] : 0u;
// fill the second part of the output word
if (word_offset)
out_word |= cur_word >> ((SYMBOLS_PER_WORD - word_offset)*SYMBOL_SIZE);
out_stream[ stride*w ] = out_word;
}
}
}
} // namespace nvbio
|
GB_unaryop__lnot_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int64
// op(A') function: GB_tran__lnot_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64)
// A*D function (colscale): GB (_AxD__rdiv_uint64)
// D*A function (rowscale): GB (_DxB__rdiv_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64)
// C=scalar+B GB (_bind1st__rdiv_uint64)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint64)
// C=A+scalar GB (_bind2nd__rdiv_uint64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zero_mat.c | /*
Zero out a block matrix.
*/
#include <stdlib.h>
#include <stdio.h>
#include "declarations.h"
void zero_mat(A)
struct blockmatrix A;
{
int i,j;
int blk;
/*
* Loop through the blocks, zeroing one at a time.
*/
for (blk=1; blk<=A.nblocks; blk++)
{
/*
* Zero out block i.
*/
switch (A.blocks[blk].blockcategory)
{
case DIAG:
for (i=1; i<=A.blocks[blk].blocksize; i++)
{
A.blocks[blk].data.vec[i]=0.0;
};
break;
case MATRIX:
#pragma omp parallel for schedule(dynamic,64) default(none) shared(A,blk) private(j,i)
for (j=1; j<=A.blocks[blk].blocksize; j++)
for (i=1; i<=A.blocks[blk].blocksize; i++)
A.blocks[blk].data.mat[ijtok(i,j,A.blocks[blk].blocksize)]=0.0;
break;
default:
printf("Illegal block type \n");
exit(12);
};
};
}
|
GB_unaryop__identity_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_bool
// op(A') function: GB_tran__identity_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp_thread_num1.c | ///TAFFO_TEST_ARGS -fopenmp
#include <omp.h>
#include <stdio.h>
#define NUM_THREADS 4
int main(void)
{
float container[NUM_THREADS] __attribute__((annotate("target('container') scalar(range(0,1) final)")));
int i;
#pragma omp parallel num_threads(NUM_THREADS)
{
float x __attribute__((annotate("target('x') scalar(range(0,5) final)"))) = 0.333333;
int index = omp_get_thread_num();
container[index] = x * index;
}
for (i = 0; i < NUM_THREADS; i++)
printf("%f\n", container[i]);
}
|
GB_unaryop__ainv_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_fp32
// op(A') function: GB_tran__ainv_fp64_fp32
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_fp32
(
double *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
VolumetricMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c"
#else
static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
THNN_CHECK_SHAPE_INDICES(input, indices);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 10,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input->dim() == 5)
{
dimt++;
dimw++;
dimh++;
dimn++;
}
int nslices = input->size(dimn);
if (gradOutput != NULL) {
if (oT != gradOutput->size(dimt) || oW != gradOutput->size(dimw) || oH != gradOutput->size(dimh))
{
THError(
"Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d",
oT, oH, oW, gradOutput->size(dimt), gradOutput->size(dimh), gradOutput->size(dimw)
);
}
THNN_CHECK_DIM_SIZE(gradOutput, input->dim(), dimn, nslices);
}
}
static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
real *input_p,
real *output_p,
THIndex_t *ind_p,
int nslices,
int iT,
int iW,
int iH,
int oT,
int oW,
int oH)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *output_p_k = output_p + k * oT * oH * oW;
real *input_p_k = input_p + k * iT * iH * iW;
THIndex_t *ind_p_k = ind_p + k * iT * iH * iW;
int t, i, j, index;
THIndex_t maxp;
for (t = 0; t < iT; t++)
{
for (i = 0; i < iH; i++)
{
for (j = 0; j < iW; j++)
{
index = t * iH * iW + i * iW + j;
maxp = ind_p_k[index] - TH_INDEX_BASE; /* retrieve position of max */
if (maxp < 0 || maxp >= oT * oW * oH)
{
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[index]; /* update output */
}
}
}
}
}
if (has_error) {
THError(
"found an invalid max index %ld (output volumes are of size %dx%dx%d)",
error_index, oT, oH, oW
);
}
}
void THNN_(VolumetricMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int iT;
int iH;
int iW;
real *input_data;
real *output_data;
THIndex_t *indices_data;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, NULL, indices,
oT, oW, oH, dT, dW, dH, pT, pW, pH);
if (input->dim() == 5)
{
nbatch = input->size(0);
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimt-1);
iT = input->size(dimt);
iH = input->size(dimh);
iW = input->size(dimw);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 4)
{
THTensor_(resize4d)(output, nslices, oT, oH, oW);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
input_data, output_data,
indices_data,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
else
{
int p;
THTensor_(resize5d)(output, nbatch, nslices, oT, oH, oW);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iT*iW*iH,
output_data+p*nslices*oT*oW*oH,
indices_data+p*nslices*iT*iW*iH,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
}
/* cleanup */
THTensor_(free)(input);
THIndexTensor_(free)(indices);
}
static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iT,
int iW,
int iH,
int oT,
int oW,
int oH)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *gradInput_p_k = gradInput_p + k * iT * iH * iW;
real *gradOutput_p_k = gradOutput_p + k * oT * oH * oW;
THIndex_t *ind_p_k = ind_p + k * iT * iH * iW;
int t, i, j, index;
THIndex_t maxp;
for (t = 0; t < iT; t++)
{
for (i = 0; i < iH; i++)
{
for (j = 0; j < iW; j++)
{
index = t * iH * iW + i * iW + j;
maxp = ind_p_k[index] - TH_INDEX_BASE; /* retrieve position of max */
if (maxp < 0 || maxp >= oT * oH * oW)
{
THError("invalid max index %ld, oT= %d, oW= %d, oH= %d", maxp, oT, oW, oH);
}
gradInput_p_k[index] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
}
void THNN_(VolumetricMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH)
{
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int iT;
int iH;
int iW;
real *gradInput_data;
real *gradOutput_data;
THIndex_t *indices_data;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, gradOutput, indices,
oT, oW, oH, dT, dW, dH, pT, pW, pH);
// TODO: check gradOutput shape
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 5)
{
nbatch = input->size(0);
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimt-1);
iT = input->size(dimt);
iH = input->size(dimh);
iW = input->size(dimw);
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 4)
{
THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
gradInput_data, gradOutput_data,
indices_data,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricMaxUnpooling_updateGradInput_frame)(
gradInput_data+p*nslices*iT*iW*iH,
gradOutput_data+p*nslices*oT*oW*oH,
indices_data+p*nslices*iT*iW*iH,
nslices,
iT, iW, iH,
oT, oW, oH
);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
bfecc_elemental_limiter_convection.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_BFECC_LIMITER_CONVECTION_INCLUDED )
#define KRATOS_BFECC_LIMITER_CONVECTION_INCLUDED
#define PRESSURE_ON_EULERIAN_MESH
#define USE_FEW_PARTICLES
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "includes/variables.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "processes/node_erase_process.h"
#include "utilities/binbased_fast_point_locator.h"
#include "processes/find_nodal_neighbours_process.h"
#include <boost/timer.hpp>
#include "utilities/timer.h"
#ifdef _OPENMP
#include "omp.h"
#endif
namespace Kratos
{
template<std::size_t TDim> class BFECCLimiterConvection
{
public:
KRATOS_CLASS_POINTER_DEFINITION(BFECCLimiterConvection<TDim>);
BFECCLimiterConvection(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure)
: mpSearchStructure(pSearchStructure)
{
}
~BFECCLimiterConvection()
{
}
//**********************************************************************************************
//**********************************************************************************************
void BFECCconvect(ModelPart& rModelPart, const Variable< double >& rVar, const Variable<array_1d<double,3> >& conv_var, const double substeps)
{
KRATOS_TRY
const double dt = rModelPart.GetProcessInfo()[DELTA_TIME];
//do movement
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rModelPart.Nodes().size();
PointerVector< Element > elem_backward( rModelPart.Nodes().size());
std::vector< Vector > Ns( rModelPart.Nodes().size());
std::vector< bool > found( rModelPart.Nodes().size());
std::vector< bool > foundf( rModelPart.Nodes().size());
//FIRST LOOP: estimate rVar(n+1)
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Element::Pointer pelement;
array_1d<double,3> bckPos = iparticle->Coordinates();
const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var);
bool is_found = ConvectBySubstepping(dt,bckPos,vel, N, pelement, result_begin, max_results, -1.0, substeps);
found[i] = is_found;
if(is_found) {
//save position backwards
elem_backward(i) = pelement;
Ns[i] = N;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
}
//now obtain the value AT TIME STEP N by taking it from N+1
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Element::Pointer pelement;
array_1d<double,3> fwdPos = iparticle->Coordinates();
const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var,1);
bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, pelement, result_begin, max_results, 1.0, substeps);
foundf[i] = is_found;
if(is_found) {
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi_old = N[0] * ( geom[0].FastGetSolutionStepValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi_old += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) );
}
//Computing error 1 and modified solution at time N to be interpolated again
iparticle->GetValue(BFECC_ERROR_1) = 0.5*iparticle->FastGetSolutionStepValue(rVar,1) - 0.5*phi_old;//computing error1 as e1 = 0.5*(rVar(n) - phi_old)
iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1) + iparticle->GetValue(BFECC_ERROR_1);//rVar(n)+e1
}
}
//Backward with modified solution
#pragma omp parallel for
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = found[i];
if(is_found) {
Vector N = Ns[i];
Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry();
double phi1 = N[0] * ( geom[0].GetValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].GetValue(rVar) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
// else
// std::cout << "it should find it" << std::endl;
}
// computing error 2 with forward of phi1
int nelements = rModelPart.NumberOfElements();
for(int i = 0 ; i < nelements; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::ElementsContainerType::iterator i_element = rModelPart.ElementsBegin() + i;
Element::GeometryType& element_geometry = i_element->GetGeometry();
Element::Pointer pelement;
array_1d<double,3> fwdPos = i_element->GetGeometry().Center();
array_1d<double,3> vel = ZeroVector(3);
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
for(int k = 0 ; k < 3; k++){
vel[k] += element_geometry[j].GetSolutionStepValue(conv_var)[k] / element_geometry.size();
}
}
bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, pelement, result_begin, max_results, 1.0, substeps);//seeking forwards
double e1 = 0.00f;
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
e1 += element_geometry[j].GetValue(BFECC_ERROR_1);
}
e1 /= element_geometry.size();
double e2 = e1;
if(is_found) {
//Forward with
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi2 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi2 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) );
}
double solution_in_center = 0;
//Computing error2 as e2 = rVar(n)-(phi2+e1)
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
solution_in_center += i_element->GetGeometry()[j].FastGetSolutionStepValue(rVar,1);
}
solution_in_center /= element_geometry.size();
e2 = solution_in_center - (phi2 + e1);
if(std::abs(e2) > std::abs(e1)){
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
element_geometry[j].GetValue(BFECC_ERROR) = minmod(e1,element_geometry[j].GetValue(BFECC_ERROR_1));
}
}
}
}
#pragma omp parallel for
for (int i = 0; i < nparticles; i++){
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = foundf[i];
if(is_found) {
iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1) + iparticle->GetValue(BFECC_ERROR);
}
}
//Backward with modified solution
#pragma omp parallel for
for (int i = 0; i < nparticles; i++){
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = found[i];
if(is_found) {
Vector N = Ns[i];
Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry();
double phi1 = N[0] * ( geom[0].GetValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].GetValue(rVar) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
}
KRATOS_CATCH("")
}
double minmod(double x, double y) {
double f;
if(x > 0.0f && y > 0.0f)
f = std::min(x,y);
else if(x < 0.0f && y < 0.0f)
f = std::max(x,y);
else
f = 0;
return f;
}
bool ConvectBySubstepping(
const double dt,
array_1d<double,3>& position, //IT WILL BE MODIFIED
const array_1d<double,3>& initial_velocity,
Vector& N,
Element::Pointer& pelement,
typename BinBasedFastPointLocator<TDim>::ResultIteratorType& result_begin,
const unsigned int max_results,
const double velocity_sign,
const double subdivisions)
{
bool is_found = false;
array_1d<double,3> veulerian;
const double small_dt = dt/subdivisions;
if(velocity_sign > 0.0) //going from the past to the future
{
noalias(position) += small_dt*initial_velocity;
unsigned int substep=0;
while(substep++ < subdivisions)
{
is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
const double new_step_factor = static_cast<double>(substep)/subdivisions;
const double old_step_factor = (1.0 - new_step_factor);
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
noalias(position) += small_dt*veulerian;
}
else
break;
}
}
else //going from the future to the past
{
noalias(position) -= small_dt*initial_velocity;
unsigned int substep=0;
while(substep++ < subdivisions)
{
is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//this factors get inverted from the other case
const double old_step_factor = static_cast<double>(substep)/subdivisions;
const double new_step_factor = (1.0 - old_step_factor);
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
noalias(position) -= small_dt*veulerian;
}
else
break;
}
}
return is_found;
}
private:
typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure;
};
} // namespace Kratos.
#endif // KRATOS_BFECC_LIMITER_CONVECTION_INCLUDED defined
|
Layer_Linear.h | /*
* Layers.h
*
* Created by Guido Novati on 29.10.18.
* Copyright 2018 ETH Zurich. All rights reserved.
*
*/
#pragma once
#include "Layers.h"
template <int nOutputs, int nInputs> struct LinearLayer : public Layer {
Params *allocate_params() const override {
return new Params(nInputs * nOutputs, nOutputs);
}
LinearLayer(const int _ID) : Layer(nOutputs, _ID) {
printf("(%d) Linear Layer of Input:%d Output:%d\n", ID, nInputs, nOutputs);
assert(nOutputs > 0 && nInputs > 0);
}
void forward(const std::vector<Activation *> &act,
const std::vector<Params *> ¶m) const override {
const int batchSize = act[ID]->batchSize;
const Real *const inputs =
act[ID - 1]->output; // size is batchSize * nInputs
const Real *const weight = param[ID]->weights; // size is nInputs * nOutputs
const Real *const bias = param[ID]->biases; // size is nOutputs
Real *const output = act[ID]->output; // size is batchSize * nOutputs
// reset layers' output with the bias
#pragma omp parallel for collapse(2)
for (int i = 0; i < batchSize; ++i) {
for (int j = 0; j < nOutputs; ++j) {
output[i * nOutputs + j] = bias[j];
}
}
// perform the forward step with gemm
gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, batchSize, nOutputs,
nInputs, 1., inputs, nInputs, weight, nOutputs, 1., output, nOutputs);
}
void bckward(const std::vector<Activation *> &act,
const std::vector<Params *> ¶m,
const std::vector<Params *> &grad) const override {
// At this point, act[ID]->dError_dOutput contins derivative of error
// with respect to the outputs of the network.
const Real *const deltas = act[ID]->dError_dOutput;
const Real *const inputs = act[ID - 1]->output;
const Real *const weight = param[ID]->weights;
const int batchSize = act[ID]->batchSize;
// BackProp to compute bias gradient: dError / dBias
{
Real *const grad_B = grad[ID]->biases; // size nOutputs
std::fill(grad_B, grad_B + nOutputs, 0.);
#pragma omp parallel for collapse(2)
for (int b = 0; b < batchSize; ++b) {
for (int i = 0; i < nOutputs; ++i) {
#pragma omp atomic
grad_B[i] += deltas[b * nOutputs + i];
}
}
}
// BackProp to compute weight gradient: dError / dWeights
{
Real *const grad_W = grad[ID]->weights; // size nInputs * nOutputs
std::fill(grad_W, grad_W + nInputs * nOutputs, 0.);
gemm(CblasRowMajor, CblasTrans, CblasNoTrans, nInputs, nOutputs,
batchSize, 1., inputs, nInputs, deltas, nOutputs, 0., grad_W,
nOutputs);
}
// BackProp to compute dEdO of prev layer
{
Real *const errinp = act[ID - 1]->dError_dOutput; // batchSize * nInputs
std::fill(errinp, errinp + batchSize * nInputs, 0.);
gemm(CblasRowMajor, CblasNoTrans, CblasTrans, batchSize, nInputs,
nOutputs, 1., deltas, nOutputs, weight, nOutputs, 0., errinp,
nInputs);
}
}
void init(std::mt19937 &gen,
const std::vector<Params *> ¶m) const override {
assert(param[ID] not_eq nullptr);
// get pointers to layer's weights and bias
Real *const W = param[ID]->weights, *const B = param[ID]->biases;
assert(param[ID]->nWeights == nInputs * size && param[ID]->nBiases == size);
// initialize weights with Xavier initialization
const Real scale = std::sqrt(6.0 / (nInputs + size));
std::uniform_real_distribution<Real> dis(-scale, scale);
std::generate(W, W + nInputs * nOutputs, [&]() { return dis(gen); });
std::generate(B, B + nOutputs, [&]() { return dis(gen); });
}
};
|
sparselu.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include "bots.h"
#include "sparselu.h"
#include "my_include.h"
/***********************************************************************
* checkmat:
**********************************************************************/
//double tmp[bots_arg_size][bots_arg_size][bots_arg_size_1*bots_arg_size_1];
int checkmat (double *M, double *N)
{
//printf("11111\n");
int i, j;
double r_err;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if (r_err < 0.0 ) r_err = -r_err;
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
printf("Checking failure: A[%d][%d]=%20.12E B[%d][%d]=%20.12E; Relative Error=%20.12E\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
bots_message("Checking failure: A[%d][%d]=%lf B[%d][%d]=%lf; Relative Error=%lf\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (double *M[])
{
int null_entry, init_val, i, j, ii, jj;
double *p;
double *prow;
double rowsum;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
// M[ii*bots_arg_size+jj] = (double *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(double));
M[ii*bots_arg_size+jj] = (double*)((void*)tmp + tmp_pos);
tmp_pos += bots_arg_size_1*bots_arg_size_1*sizeof(double);
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
/* Modify diagonal element of each row in order */
/* to ensure matrix is diagonally dominant and */
/* well conditioned. */
prow = p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
rowsum = 0.0;
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (double)((init_val - 32768.0) / 16384.0);
rowsum += abs(*p);
p++;
}
if (ii == jj)
*(prow+i) = rowsum * (double) bots_arg_size + abs(*(prow+i));
prow += bots_arg_size_1;
}
}
else
{
M[ii*bots_arg_size+jj] = NULL;
}
}
}
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, double *M[])
{
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");}
else bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
double * allocate_clean_block()
{
int i,j;
double *p, *q;
//printf("1\n");
// p = (double *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(double));
p = (double*)((void*)tmp + tmp_pos);
tmp_pos += bots_arg_size_1*bots_arg_size_1*sizeof(double);
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
bots_message("Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(double *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++) {
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
//kai
flag1 = k;
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(double *diag, double *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++) {
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(double *row, double *col, double *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++){
for (j=0; j<bots_arg_size_1; j++){
for (k=0; k<bots_arg_size_1; k++){
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
flag9 = k;
}
flag8 = j;
}
flag7 = i;
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void vbmod(double *row, double *col, double *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(double *diag, double *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (double ***pBENCH, char *pass)
{
tmp_pos = 0;
*pBENCH = (double **) malloc(bots_arg_size*bots_arg_size*sizeof(double *));
// printf("%s\n", pass):
tmp = malloc((bots_arg_size*bots_arg_size)*bots_arg_size_1*bots_arg_size_1*sizeof(double));
//*pBENCH = (double **)tmp + tmp_pos;
//tmp_pos += bots_arg_size*bots_arg_size*sizeof(double *);
genmat(*pBENCH);
crucial_data(tmp, "double", (bots_arg_size*bots_arg_size*bots_arg_size_1*bots_arg_size_1));
//kai
consistent_data(&flag1, "int", 1);
consistent_data(&flag2, "int", 1);
consistent_data(&flag3, "int", 1);
consistent_data(&flag4, "int", 1);
consistent_data(&flag5, "int", 1);
consistent_data(&flag6, "int", 1);
consistent_data(&flag7, "int", 1);
consistent_data(&flag8, "int", 1);
consistent_data(&flag9, "int", 1);
/* spec print_structure(pass, *pBENCH); */
}
void sparselu_par_call(double **BENCH)
{
//printf("1111\n");
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
//kai
flush_whole_cache();
start_crash();
#pragma omp parallel
#pragma omp single nowait
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++) {
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
//kai
flag2 = jj;
}
for (ii=kk+1; ii<bots_arg_size; ii++) {
if (BENCH[ii*bots_arg_size+kk] != NULL)
#pragma omp task untied firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
//kai
flag3 = ii;
}
#pragma omp taskwait
for (ii=kk+1; ii<bots_arg_size; ii++) {
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
flag6 = jj;
}
//kai
flag4 = ii;
/* if(kk==0 && ii==47)
{
FILE *file = fopen("checkpoint","w");
int ijk = 0;
double *temp_tmp = (double *)tmp;
for(ijk= 0; ijk<bots_arg_size*bots_arg_size*bots_arg_size_1*bots_arg_size_1;ijk++)
{
fprintf(file, "%21.12E\n",*(temp_tmp+ijk));
}
fclose(file);
}*/
}
#pragma omp taskwait
flag5 = kk;
}
//kai
end_crash();
bots_message(" completed!\n");
}
void sparselu_seq_call(double **BENCH)
{
int ii, jj, kk;
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
void sparselu_fini (double **BENCH, char *pass)
{
/* spec print_structure(pass, BENCH); */
return;
}
/*
* changes for SPEC, original source
*
int sparselu_check(double **SEQ, double **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
*/
/*
* SPEC modified check, print out values
*
*/
/*int sparselu_check(double **SEQ, double **BENCH)
{
int i, j, ok;
bots_message("Output size: %d\n",bots_arg_size);
for (i = 0; i < bots_arg_size; i+=50)
{
for (j = 0; j < bots_arg_size; j+=40)
{
ok = checkmat1(BENCH[i*bots_arg_size+j]);
}
}
return BOTS_RESULT_SUCCESSFUL;
}
int checkmat1 (double *N)
{
int i, j;
for (i = 0; i < bots_arg_size_1; i+=20)
{
for (j = 0; j < bots_arg_size_1; j+=20)
{
bots_message("Output Matrix: A[%d][%d]=%8.12f \n",
i,j, N[i*bots_arg_size_1+j]);
}
}
//printf("111111111\n");
return TRUE;
}*/void vgenmat (double *M[])
{
int null_entry, init_val, i, j, ii, jj;
double *p;
double *prow;
double rowsum;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
M[ii*bots_arg_size+jj] = (double *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(double));
// M[ii*bots_arg_size+jj] = (double*)((void*)tmp + tmp_pos);
// tmp_pos += bots_arg_size_1*bots_arg_size_1*sizeof(double);
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
/* Modify diagonal element of each row in order */
/* to ensure matrix is diagonally dominant and */
/* well conditioned. */
prow = p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
rowsum = 0.0;
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (double)((init_val - 32768.0) / 16384.0);
rowsum += abs(*p);
p++;
}
if (ii == jj)
*(prow+i) = rowsum * (double) bots_arg_size + abs(*(prow+i));
prow += bots_arg_size_1;
}
}
else
{
M[ii*bots_arg_size+jj] = NULL;
}
}
}
}
void vlu0(double *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
void vsparselu_init (double ***pBENCH, char *pass)
{
*pBENCH = (double **) malloc(bots_arg_size*bots_arg_size*sizeof(double *));
vgenmat(*pBENCH);
/* spec print_structure(pass, *pBENCH); */
}
void vsparselu_par_call(double **BENCH)
{
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
#pragma omp parallel
#pragma omp single nowait
for (kk=0; kk<bots_arg_size; kk++)
{
vlu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
#pragma omp task untied firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
#pragma omp taskwait
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
vbmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
#pragma omp taskwait
}
bots_message("pre verify completed!\n");
}
int sparselu_check(double **SEQ, double **BENCH)
{
vsparselu_init(&SEQ, NULL);
vsparselu_par_call(SEQ);
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
/*
int i, j, ok;
bots_message("Output size: %d\n",bots_arg_size);
for (i = 0; i < bots_arg_size; i+=50)
{
for (j = 0; j < bots_arg_size; j+=40)
{
ok = checkmat1(BENCH[i*bots_arg_size+j]);
}
}
return BOTS_RESULT_SUCCESSFUL;
*/
}
int checkmat1 (double *N)
{
int i, j;
for (i = 0; i < bots_arg_size_1; i+=20)
{
for (j = 0; j < bots_arg_size_1; j+=20)
{
bots_message("Output Matrix: A[%d][%d]=%8.12f \n",
i,j, N[i*bots_arg_size_1+j]);
}
}
//printf("111111111\n");
return TRUE;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(4*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t2+Nx,1024),floord(Nt+Nx-4,1024)),floord(2*t1+Nx+1,1024)),floord(8*t3+Nx+4,1024)),floord(4*t1-4*t2+Nz+Nx-1,1024));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),1024*t4+1022),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
threaded.c | #include<stdlib.h>
#include<math.h>
#include<stdio.h>
void Foo(){
int j =0 ;
while( j ++ < 0xff){
int i;
int sz = 0xffff;
int * a = malloc(sz * sizeof(int));
//fprintf(stderr, "\n a = %p \n", a);
int * b = calloc(sz * sizeof(int), 1);
//fprintf(stderr, "\n b = %p \n", b);
int * c = malloc(sz * sizeof(int));
//fprintf(stderr, "\n c = %p \n", c);
int * d = malloc(sz * sizeof(int));
//fprintf(stderr, "\n d = %p \n", d);
int * e = realloc(a, (sz + 100) * sizeof(int));
//fprintf(stderr, "\n e = %p \n", e);
for (i = 0 ; i < sz; i++)
d[i] = c[i] = b[i] = e[i] = 0;
free(e);
free(b);
free(c);
free(d);
}
}
int main(){
fprintf(stderr, "\n APP STARTED \n");
#pragma omp parallel
{
Foo();
}
return 0;
}
|
rkb_screen.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#include "np_helper/np_helper.h"
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < (n_dm+1)/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = (n_dm+2) / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const size_t nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = .5 * (cabs(pdm[i*nao+j]) + cabs(pdm[j*nao+i]));
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmscond[iset*nbas*nbas+jsh*nbas+ish] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
dmcond[jsh*nbas+ish] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
if (nset < 3) {
fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are "
"required to set rkb prescreening\n");
exit(1);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
sph.h | #ifndef SPH_H
#define SPH_H
#include <omp.h>
#include <vector>
#include <cstdlib>
#include <cmath>
#include <glm/vec3.hpp>
#include <glm/mat3x3.hpp>
#include <glm/geometric.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtx/scalar_multiplication.hpp>
#include "sphparticle.h"
#include "sphmap.h"
#define LOG 0
#define SHOW_FPRES 0
#define SHOW_FVISC 0
#define PI 3.14159265358979323846264338327950288
using namespace std;
using namespace glm;
class SPHSystem {
private:
SpatialHashTable* table;
vector<Particle*> Ps;
vec3 spos;
vec3 size;
vec3 gap;
vec3 m_d;
vec3 container_lb;
vec3 container_ub;
float k;
float g;
float density0;
float supportRadius;
float smoothingRadius;
float penalty;
float viscosity;
float f(float q) {
float ans;
if (0 <= q && q < 1)
ans = 2.0 / 3.0 - pow(q, 2) + 0.5 * pow(q, 3);
else if (1 <= q && q < 2)
ans = 1.0 / 6.0 * pow(2 - q, 3);
else
ans = 0.0;
return ans * 3.0 / 2.0 / PI;
}
float df(float q) {
float ans;
if (0 <= q && q < 1)
ans = -2 * q + 3.0 / 2.0 * pow(q, 2);
else if (1 <= q && q < 2)
ans = -0.5 * pow(2 - q, 2);
else
ans = 0.0;
return ans * 3.0 / 2.0 / PI;
}
float W(Particle* x_i, Particle* x_j, float smoothingRadius) {
float q = distance(x_i->getPosition(), x_j->getPosition()) / smoothingRadius;
return 1.0 / pow(smoothingRadius, 3) * f(q);
}
vec3 dW(Particle* x_i, Particle* x_j, float smoothingRadius) {
float q = distance(x_i->getPosition(), x_j->getPosition()) / smoothingRadius;
vec3 norm = x_i->getPosition() - x_j->getPosition();
norm = norm / sqrt(dot(norm, norm));
return 1.0 / pow(smoothingRadius, 4) * df(q) * norm;
}
public:
SPHSystem(vec3 pos, vec3 size, vec3 gap, float m_d, vec3 container_lb, vec3 container_ub,
float g, float penalty, float k, float density0, float supportRadius, float smoothingRadius, float viscosity) :
spos(pos), size(size), gap(gap), m_d(m_d), container_lb(container_lb), container_ub(container_ub),
g(g), penalty(penalty), k(k), density0(density0), supportRadius(supportRadius), smoothingRadius(smoothingRadius), viscosity(viscosity)
{
this->table = new SpatialHashTable(m_d);
float mass = pow(supportRadius / 2.0, 3) * density0; // pow(2.0 / 3.0 * smoothingRadius, 3) * density0;
vec3 center((size.x-1) * gap.x / 2, (size.y - 1) * gap.y / 2, (size.z - 1) * gap.z / 2);
for (int ix = 0; ix < size[0]; ix++) {
for (int iy = 0; iy < size[1]; iy++) {
for (int iz = 0; iz < size[2]; iz++) {
vec3 ppos(gap[0] * ix, gap[1] * iy, gap[2] * iz);
if (distance(ppos, center) > (size.x - 1) * gap.x / 2 + 0.1) continue;
vec3 rand(0.02 * rand() / RAND_MAX, 0.02 * rand() / RAND_MAX, 0.02 * rand() / RAND_MAX);
ppos = ppos + pos + rand;
Ps.push_back(new Particle(ppos, mass));
}
}
}
}
void build() {
table->clear();
table->build(Ps);
for (int i = 0; i < Ps.size(); i++)
computeNeighborBlocks(Ps[i]);
#pragma omp parallel for
for (int i = 0; i < Ps.size(); i++)
computeNeighbors(Ps[i]);
#pragma omp parallel for
for (int i = 0; i < Ps.size(); i++)
computeBasics(Ps[i]);
#pragma omp parallel for
for (int i = 0; i < Ps.size(); i++)
computeForces(Ps[i]);
}
void computeNeighborBlocks(Particle* p) {
table->neighborBlocks(p);
}
void computeNeighbors(Particle* p) {
table->neighbors(p);
p->setBuiltNeighborsFlag(true);
}
void computeBasics(Particle* p) {
float mass = p->getMass();
vec3 pos = p->getPosition();
vector<Particle*>* neighbors = p->getNeighbors(true);
// compute density & pressure
float dens = 0;
#pragma omp parallel for reduction(+:dens)
for (int j = 0; j < neighbors->size(); j++) {
Particle* p_j = neighbors->at(j);
dens += p_j->getMass() * W(p, p_j, smoothingRadius);
}
float pres = k * (pow(dens / density0, 7) - 1);
p->setDensity(dens);
p->setPressure(pres);
p->setBuiltBasicsFlag(true);
}
void computeForces(Particle* p) {
vec3 pos = p->getPosition();
vec3 vel = p->getVelocity();
float mass = p->getMass();
float dens = p->getDensity();
float pres = p->getPressure();
vector<Particle*>* neighbors = p->getNeighbors(true);
// -----------------------
// compute F_pressure
// -----------------------
vec3 dPres(0, 0, 0);
if (SHOW_FPRES) {
vec3 tpos = p->getPosition();
printf("stats of %.0f-%.0f-%.0f:\tmass: %.2f\tdensity: %.2f\tpressure: %.2f\n",
tpos.x, tpos.y, tpos.z, mass, dens, pres);
}
#pragma omp parallel for
for (int j = 0; j < neighbors->size(); j++) {
Particle* p_j = neighbors->at(j);
float mass_j = p_j->getMass();
float dens_j = p_j->getDensity();
float pres_j = p_j->getPressure();
vec3 dW_ij = dW(p, p_j, smoothingRadius);
vec3 dPres_j = mass_j * (pres / pow(dens, 2) + pres_j / pow(dens_j, 2)) * dW_ij;
dPres += dPres_j;
if (SHOW_FPRES) {
vec3 tpos = p_j->getPosition();
printf("\tstats of %.0f-%.0f-%.0f:\tmass: %.2f density: %.2f pressure: %.2f param: %.2f dW: %.2f-%.2f-%.2f dPres: %.2f-%.2f-%.2f\n",
tpos.x, tpos.y, tpos.z,
mass_j, dens_j, pres_j,
mass_j * (pres / pow(dens, 2) + pres_j / pow(dens_j, 2)),
dW_ij.x, dW_ij.y, dW_ij.z,
dPres_j.x, dPres_j.y, dPres_j.z
);
}
}
vec3 Fpres = - mass * dPres;
if (SHOW_FPRES) {
printf("dpres: %.4f %.4f %.4f\n", dPres.x, dPres.y, dPres.z);
printf("Fpres: %.4f %.4f %.4f\n", Fpres.x, Fpres.y, Fpres.z);
}
// -----------------------
// compute F_viscosity
// -----------------------
vec3 ddVisc(0, 0, 0);
if (SHOW_FVISC) {
vec3 tpos = pos;
printf("stats of %.0f-%.0f-%.0f:\tmass: %.2f\tvel: %.2f-%.2f-%.2f\n",
tpos.x, tpos.y, tpos.z, mass, vel.x, vel.y, vel.z);
}
#pragma omp parallel for
for (int j = 0; j < neighbors->size(); j++) {
Particle* p_j = neighbors->at(j);
float mass_j = p_j->getMass();
float dens_j = p_j->getDensity();
vec3 x_ij = pos - p_j->getPosition();
vec3 v_ij = vel - p_j->getVelocity();
vec3 dW_ij = dW(p, p_j, smoothingRadius);
vec3 ddVisc_j = mass_j / dens_j * v_ij * (dot(x_ij, dW_ij) / (dot(x_ij, x_ij) + 0.01 * pow(smoothingRadius, 2)));
ddVisc += ddVisc_j;
if (SHOW_FVISC) {
vec3 tpos = p_j->getPosition();
printf("\tstats of %.0f-%.0f-%.0f:\tmass: %.2f density: %.2f dW: %.2f-%.2f-%.2f ddVisc: %.2f-%.2f-%.2f\n",
tpos.x, tpos.y, tpos.z,
mass_j, dens_j,
dW_ij.x, dW_ij.y, dW_ij.z,
ddVisc_j.x, ddVisc_j.y, ddVisc_j.z
);
}
}
vec3 Fvisc = 2 * mass * viscosity * ddVisc;
if (SHOW_FVISC) {
printf("ddVisc: %.4f %.4f %.4f\n", ddVisc.x, ddVisc.y, ddVisc.z);
printf("Fvisc: %.4f %.4f %.4f\n", Fvisc.x, Fvisc.y, Fvisc.z);
}
p->setFpres(Fpres);
p->setFvisc(Fvisc);
p->setBuiltForcesFlag(true);
}
void applyGForces() {
vec3 g(0.0f, -g, 0.0f);
#pragma omp parallel for
for (int i = 0 ; i < Ps.size() ; i++)
Ps[i]->applyPermForce(Ps[i]->getMass() * g);
}
void applyTempForces(vec3& f) {
#pragma omp parallel for
for (int i = 0; i < Ps.size(); i++)
Ps[i]->applyTempForce(f);
}
void clearTempForces() {
#pragma omp parallel for
for (int i = 0; i < Ps.size(); i++)
Ps[i]->clearTempForce();
}
void applySPHForces() {
#pragma omp parallel for
for (int i = 0 ; i < Ps.size() ; i++) {
Ps[i]->applyTempForce(Ps[i]->getFpres());
Ps[i]->applyTempForce(Ps[i]->getFvisc());
}
}
void setContainer(vec3 container_lb, vec3 container_ub) {
this->container_lb = container_lb;
this->container_ub = container_ub;
}
void applyPenaltyForces() {
#pragma omp parallel for
for (int i = 0; i < Ps.size(); i++) {
vec3 pos = Ps[i]->getPosition();
if (pos.y <= container_lb.y) {
vec3 penalty_force(0, -penalty * (pos.y - container_lb.y), 0);
Ps[i]->applyTempForce(penalty_force);
}
if (pos.y > container_ub.y) {
vec3 penalty_force(0, -penalty * (pos.y - container_ub.y), 0);
Ps[i]->applyTempForce(penalty_force);
}
if (pos.x <= container_lb.x) {
vec3 penalty_force(-penalty * (pos.x - container_lb.x), 0, 0);
Ps[i]->applyTempForce(penalty_force);
}
if (pos.x > container_ub.x) {
vec3 penalty_force(-penalty * (pos.x - container_ub.x), 0, 0);
Ps[i]->applyTempForce(penalty_force);
}
if (pos.z <= container_lb.z) {
vec3 penalty_force(0, 0, -penalty * (pos.z - container_lb.z));
Ps[i]->applyTempForce(penalty_force);
}
if (pos.z > container_ub.z) {
vec3 penalty_force(0, 0, -penalty * (pos.z - container_ub.z));
Ps[i]->applyTempForce(penalty_force);
}
}
}
int getSize() { return Ps.size(); }
vector<Particle*>* getParticles() { return &Ps; }
void getPositions(float* pos) {
#pragma omp parallel for
for (int i = 0 ; i < Ps.size() ; i++) {
vec3 ppos = Ps[i]->getPosition();
for (int j = 0 ; j < 3 ; j++) pos[3 * i + j] = ppos[j];
}
}
void getVelocities(float* vel) {
#pragma omp parallel for
for (int i = 0 ; i < Ps.size() ; i++) {
vec3 pvel = Ps[i]->getVelocity();
for (int j = 0 ; j < 3 ; j++) vel[3 * i + j] = pvel[j];
}
}
void getPosAcc(float* posacc) {
#pragma omp parallel for
for (int i = 0 ; i < Ps.size() ; i++) {
vec3 ppos = Ps[i]->getPosition();
vec3 pacc = ppos + Ps[i]->computeTempAcceleration();
for (int j = 0 ; j < 3 ; j++) posacc[6 * i + 0 + j] = ppos[j];
for (int j = 0 ; j < 3 ; j++) posacc[6 * i + 3 + j] = pacc[j];
}
}
void printPositions()
{
printf("\n");
for (int i = 0 ; i < Ps.size() ; i++) {
vec3 pos = Ps[i]->getPosition();
printf("%.3f-%.3f-%.3f ", pos[0], pos[1], pos[2]);
}
}
};
#endif |
bm2d_mdev_iteration.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "homp.h"
#include "bm2d.h"
void bm2d_omp_mdev_iteration_launcher(omp_offloading_t *off, void *args) {
struct bm2d_off_args * iargs = (struct bm2d_off_args*) args;
long n = iargs->n;
long m = iargs->m;
int maxwin = iargs->maxwin;
int num_its = iargs->num_its;
long u_dimX = iargs->u_dimX;
long u_dimY = iargs->u_dimY;
int coeff_dimX = iargs->coeff_dimX;
omp_data_map_t * map_u = omp_map_get_map(off, iargs->u, -1); /* 1 is for the map u */
omp_data_map_t * map_uold = omp_map_get_map(off, iargs->uold, -1); /* 2 is for the map uld */
omp_data_map_t * map_coeff = omp_map_get_map(off, iargs->coeff, -1); /* 2 is for the map uld */
REAL * u = (REAL*) map_u->map_dev_wextra_ptr;
REAL * uold = (REAL*) map_uold->map_dev_wextra_ptr;
REAL *coeff = (REAL*) map_coeff->map_dev_wextra_ptr;
coeff = coeff + (2*maxwin+1) * maxwin + maxwin; /* TODO this should be a call to map a host-side address to dev-side address*/
long it; /* iteration */
#if CORRECTNESS_CHECK
printf("kernel launcher: u: %X, uold: %X\n", u, uold);
print_array("u in device: ", "udev", u, n, m);
print_array("uold in device: ", "uolddev", uold, n, m);
#endif
long offset;
long start;
long len;
/* row-wise dist */
offset = omp_loop_get_range(off, 0, &start, &len);
#if 0
/* col-wise dist */
omp_loop_get_range(off, 0, &start, &len);
/* row/col-wise dist */
omp_loop_get_range(off, 0, &start, &len); /* todo */
omp_loop_get_range(off, 0, &start, &len); /* todo */
#endif
omp_device_type_t devtype = off->dev->type;
void (*bm2d_kernel_wrapper)(omp_offloading_t *off, long start_n, long len_n, long n, long m, long u_dimX,
long u_dimY, REAL *u, REAL *uold, int maxwin, int coeff_dimX, REAL *coeff);
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
bm2d_kernel_wrapper = bm2d_nvgpu_cuda_wrapper;
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined(DEVICE_ITLMIC_SUPPORT)
bm2d_kernel_wrapper = bm2d_itlmic_wrapper;
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
bm2d_kernel_wrapper = bm2d_cpu_omp_wrapper;
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
//printf("dev: %d, offset: %d, length: %d, local start: %d, u: %X, uold: %X, coeff-center: %X\n", off->devseqid, offset, len, start, u, uold, coeff);
//#pragma omp parallel shared(n, m, maxwin, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold)
omp_event_t *events = off->events;
omp_dev_stream_t *stream = off->stream;
omp_offloading_info_t * off_info = off->off_info;
omp_event_record_stop(&events[acc_kernel_exe_event_index]); /* match the runtime start */
omp_event_accumulate_elapsed_ms(&events[acc_kernel_exe_event_index], 0);
for (it = 0; it < num_its; it++) {
omp_event_record_start(&events[acc_kernel_exe_event_index]);
REAL * uu;
REAL * uuold;
if (it % 2 == 0) {
uu = u;
uuold = uold;
} else {
uu = uold;
uuold = u;
}
bm2d_kernel_wrapper(off, start, len, n, m, u_dimX, u_dimY, uu, uuold, maxwin, coeff_dimX, coeff);
//printf("iteration %d by dev: %d\n", it, off->dev->id);
omp_event_record_stop(&events[acc_kernel_exe_event_index]);
omp_event_accumulate_elapsed_ms(&events[acc_kernel_exe_event_index], 0);
}
/* match the runtime stop */
omp_event_record_start(&events[acc_kernel_exe_event_index]);
}
double bm2d_omp_mdev_iterate(int ndevs, int *targets, long n, long m, REAL *u, int maxwin, REAL *coeff,
int num_its) {
long u_dimX = n + 2 * maxwin;
long u_dimY = m + 2 * maxwin;
int coeff_dimX = 2*maxwin+1;
REAL * coeff_center = coeff + (2*maxwin+1) * maxwin + maxwin; /* let coeff point to the center element */
REAL *uold = (REAL *) omp_unified_malloc(sizeof(REAL) * u_dimX * u_dimY);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY);
//print_array("Before offloading", "u", u, u_dimX, u_dimY);
double off_init_time = read_timer_ms();
/**************************************** dist-specific *****************************************/
int __top_ndims__ = 1;
/* TODO: to use row/col-wise dist, __top_ndims__ should be set to 2 */
omp_grid_topology_t * __top__ = omp_grid_topology_init(ndevs, targets, __top_ndims__);
/* init other infos (dims, periodic, idmaps) of top if needed */
int __num_maps__ = 3; /* u, uold and the coeff */ /* XXX: need compiler output */
/* data copy offloading */
omp_offloading_info_t *__copy_data_off_info__ =
omp_offloading_init_info("data_copy", __top__, 1, OMP_OFFLOADING_DATA, __num_maps__, NULL, NULL, 0);
/* stencil kernel offloading */
struct bm2d_off_args off_args;
off_args.n = n; off_args.m = m; off_args.u = u; off_args.maxwin = maxwin; off_args.coeff = coeff; off_args.num_its = num_its;
off_args.uold = uold; off_args.coeff_center = coeff_center; off_args.coeff_dimX = coeff_dimX; off_args.u_dimX = u_dimX; off_args.u_dimY = u_dimY;
omp_offloading_info_t * __off_info__ =
omp_offloading_init_info("bm2d_kernel", __top__, 0, OMP_OFFLOADING_CODE, 0,
bm2d_omp_mdev_iteration_launcher, &off_args, 1);
omp_offloading_append_profile_per_iteration(__off_info__, 13*u_dimY, 7, 1);
//printf("data copy off: %X, bm2d off: %X\n", __copy_data_off_info__, __off_info__);
/* u map info */
omp_data_map_info_t *__u_map_info__ = &__copy_data_off_info__->data_map_info[0];
omp_data_map_init_info("u", __u_map_info__, __copy_data_off_info__, u, 2, sizeof(REAL), OMP_DATA_MAP_TOFROM, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_2d(__u_map_info__, u_dimX, u_dimY);
/* uold map info */
omp_data_map_info_t *__uold_map_info__ = &__copy_data_off_info__->data_map_info[1];
omp_data_map_init_info("uold", __uold_map_info__, __copy_data_off_info__, uold, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_2d(__uold_map_info__, u_dimX, u_dimY);
/* coeff map info */
omp_data_map_info_t *__coeff_map_info__ = &__copy_data_off_info__->data_map_info[2];
omp_data_map_init_info("coeff", __coeff_map_info__, __copy_data_off_info__, coeff, 2, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_2d(__coeff_map_info__, coeff_dimX, coeff_dimX);
omp_data_map_dist_init_info(__coeff_map_info__, 0, OMP_DIST_POLICY_FULL, 0, coeff_dimX, 0, 0);
omp_data_map_dist_init_info(__coeff_map_info__, 1, OMP_DIST_POLICY_FULL, 0, coeff_dimX, 0, 0);
/**************************************** dist-specific *****************************************/
/* row-wise distribution */
#if 0
/* BLOCK_BLOCK */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
//printf("BLOCK dist policy for arrays and loop dist\n");
/* BLOCK_ALIGN */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_loop_dist_align_with_data_map(__off_info__, 0, 0, __u_map_info__, 0);
//printf("BLOCK dist policy for arrays, and loop dist align with array A row dist\n");
#endif
/* AUTO_ALIGN */
omp_loop_dist_init_info(__off_info__, 0, LOOP_DIST_POLICY, 0, n, LOOP_DIST_CHUNK_SIZE, 0);
omp_data_map_dist_align_with_loop(__u_map_info__, 0, maxwin, __off_info__, 0);
//printf("AUTO dist policy for loop dist and array align with loops\n");
/* used by all row-wise dist */
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_FULL, 0, u_dimY, 0, 0);
omp_map_add_halo_region(__u_map_info__, 0, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, OMP_ALIGNEE_OFFSET, __u_map_info__, OMP_ALL_DIMENSIONS);
#if 0
/* col-wise distribution */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_FULL, maxwin, n, 0, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_map_add_halo_region(__u_map_info__, 0, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 0, 0);
/* row/col-wise distribution */
omp_data_map_dist_init_info(__u_map_info__, 0, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 0);
omp_data_map_dist_init_info(__u_map_info__, 1, OMP_DIST_POLICY_BLOCK, maxwin, n, 0, 1);
omp_map_add_halo_region(__u_map_info__, 0, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_map_add_halo_region(__u_map_info__, 1, maxwin, maxwin, OMP_DIST_HALO_EDGING_REFLECTING);
omp_data_map_dist_align_with_data_map_with_halo(__uold_map_info__, OMP_ALL_DIMENSIONS, 0, __u_map_info__, OMP_ALL_DIMENSIONS);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
omp_loop_dist_init_info(__off_info__, 1, OMP_DIST_POLICY_BLOCK, 0, m, 0, 1);
#endif
#if 0
/* halo exchange offloading */
omp_data_map_halo_exchange_info_t x_halos[1];
x_halos[0].map_info = __u_map_info__; x_halos[0].x_direction = OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT; /* u and uold*/
/* row-wise dist */
x_halos[0].x_dim = 0;
#if 0
/* col-wise dist */
x_halos[0].x_dim = 1;
/* row/col-wise dist */
x_halos[0].x_dim = -1; /* means all the dimension */
#endif
omp_offloading_append_data_exchange_info(__off_info__, x_halos, 1);
#endif
/************************************************************************************************/
off_init_time = read_timer_ms() - off_init_time;
/*********** NOW notifying helper thread to work on this offload ******************/
#if DEBUG_MSG
printf("=========================================== offloading to %d targets ==========================================\n", __num_target_devices__);
#endif
double off_copyto_time = read_timer_ms();
double start_time = off_copyto_time;
omp_offloading_start(__copy_data_off_info__);
off_copyto_time = read_timer_ms() - off_copyto_time;
omp_print_map_info(__u_map_info__);
omp_print_map_info(__uold_map_info__);
omp_print_map_info(__coeff_map_info__);
// printf("offloading from stencil now\n");
double off_kernel_time = read_timer_ms();
int it;
int num_runs = 10;
for (it=0; it< num_runs; it++) omp_offloading_start(__off_info__);
off_kernel_time = (read_timer_ms() - off_kernel_time)/ num_runs;
/* copy back u from each device and free others */
double off_copyfrom_time = read_timer_ms();
omp_offloading_start(__copy_data_off_info__);
off_copyfrom_time = read_timer_ms() - off_copyfrom_time;
double off_total = off_init_time + off_copyto_time + off_copyfrom_time + off_kernel_time;
#if defined (OMP_BREAKDOWN_TIMING)
/* not reporting status for data copy */
//omp_offloading_info_report_profile(__copy_data_off_info__);
omp_offloading_info_report_profile(__off_info__, num_runs);
//omp_offloading_info_t *infos[2];
//infos[0] = __copy_data_off_info__;
//infos[1] = __off_info__;
//omp_offloading_info_sum_profile(infos, 2, start_time, start_time+off_total);
//omp_offloading_info_report_profile(__copy_data_off_info__);
#endif
omp_offloading_fini_info(__copy_data_off_info__);
omp_offloading_fini_info(__off_info__);
omp_grid_topology_fini(__top__);
omp_unified_free(uold);
return off_total;
}
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
{
ierr = itEl->Check(rCurrentProcessInfo);
if (ierr != 0)
break;
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
bool SolveSolutionStep() override
{
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int stepsWithChangedDt = rCurrentProcessInfo[STEPS_WITH_CHANGED_DT];
bool converged = false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("\n Solution with two_step_vp_strategy at t=") << currentTime << "s" << std::endl;
if ((timeIntervalChanged == true && currentTime > 10 * timeInterval) || stepsWithChangedDt > 0)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
this->SetBlockedFlag();
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2))
{
this->UpdateStressStrain();
}
if ((continuityConverged && momentumConverged) && it > 2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
return converged;
}
void FinalizeSolutionStep() override
{
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void SetBlockedFlag()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
std::vector<array_1d<double, 3>> nodesCoordinates;
nodesCoordinates.resize(numNodes);
(itElem)->Set(BLOCKED, false);
(itElem)->Set(ISOLATED, false);
unsigned int freeSurfaceNodes = 0;
unsigned int freeSurfaceRigidNodes = 0;
unsigned int rigidNodes = 0;
unsigned int isolatedNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if (itElem->GetGeometry()[i].Is(FREE_SURFACE))
{
freeSurfaceNodes++;
if (itElem->GetGeometry()[i].Is(RIGID))
{
freeSurfaceRigidNodes++;
}
}
else if (itElem->GetGeometry()[i].Is(RIGID))
{
rigidNodes++;
}
nodesCoordinates[i] = itElem->GetGeometry()[i].Coordinates();
ElementWeakPtrVectorType &neighb_elems = itElem->GetGeometry()[i].GetValue(NEIGHBOUR_ELEMENTS);
if (neighb_elems.size() == 1)
{
isolatedNodes++;
}
}
// if (dimension == 3 && (freeSurfaceNodes == numNodes || (freeSurfaceNodes + rigidNodes) == numNodes))
if (dimension == 3)
{
double a1 = 0; //slope x for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double b1 = 0; //slope y for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double c1 = 0; //slope z for plane on the first triangular face of the tetrahedra (nodes A,B,C)
a1 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[2][2] - nodesCoordinates[0][2]) - (nodesCoordinates[2][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b1 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[2][0] - nodesCoordinates[0][0]) - (nodesCoordinates[2][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c1 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[2][1] - nodesCoordinates[0][1]) - (nodesCoordinates[2][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a2 = 0; //slope x for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double b2 = 0; //slope y for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double c2 = 0; //slope z for plane on the second triangular face of the tetrahedra (nodes A,B,D)
a2 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[3][2] - nodesCoordinates[0][2]) - (nodesCoordinates[3][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b2 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[3][0] - nodesCoordinates[0][0]) - (nodesCoordinates[3][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c2 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[3][1] - nodesCoordinates[0][1]) - (nodesCoordinates[3][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a3 = 0; //slope x for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double b3 = 0; //slope y for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double c3 = 0; //slope z for plane on the third triangular face of the tetrahedra (nodes B,C,D)
a3 = (nodesCoordinates[1][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[1][2] - nodesCoordinates[2][2]);
b3 = (nodesCoordinates[1][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[1][0] - nodesCoordinates[2][0]);
c3 = (nodesCoordinates[1][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[1][1] - nodesCoordinates[2][1]);
double a4 = 0; //slope x for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double b4 = 0; //slope y for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double c4 = 0; //slope z for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
a4 = (nodesCoordinates[0][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[0][2] - nodesCoordinates[2][2]);
b4 = (nodesCoordinates[0][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[0][0] - nodesCoordinates[2][0]);
c4 = (nodesCoordinates[0][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[0][1] - nodesCoordinates[2][1]);
double cosAngle12 = (a1 * a2 + b1 * b2 + c1 * c2) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle13 = (a1 * a3 + b1 * b3 + c1 * c3) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
double cosAngle14 = (a1 * a4 + b1 * b4 + c1 * c4) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)));
double cosAngle23 = (a3 * a2 + b3 * b2 + c3 * c2) / (sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle24 = (a4 * a2 + b4 * b2 + c4 * c2) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle34 = (a4 * a3 + b4 * b3 + c4 * c3) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
if ((fabs(cosAngle12) > 0.99 || fabs(cosAngle13) > 0.99 || fabs(cosAngle14) > 0.99 || fabs(cosAngle23) > 0.99 || fabs(cosAngle24) > 0.99 || fabs(cosAngle34) > 0.99) && (freeSurfaceNodes == numNodes) && isolatedNodes > 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.995 || fabs(cosAngle13) > 0.995 || fabs(cosAngle14) > 0.995 || fabs(cosAngle23) > 0.995 || fabs(cosAngle24) > 0.995 || fabs(cosAngle34) > 0.995) && (freeSurfaceNodes == numNodes) && isolatedNodes == 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999) && (freeSurfaceNodes == numNodes))
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
// else if (fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999)
// {
// (itElem)->Set(BLOCKED, true);
// // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
// }
}
if (freeSurfaceNodes == numNodes && rigidNodes == 0 && isolatedNodes >= (numNodes - 1))
{
(itElem)->Set(ISOLATED, true);
(itElem)->Set(BLOCKED, false);
}
}
}
KRATOS_CATCH("");
}
void UnactiveSliverElements()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
MesherUtilities MesherUtils;
double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart);
double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size());
double ElementalVolume = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
if (numNodes == (dimension + 1))
{
if (dimension == 2)
{
ElementalVolume = (itElem)->GetGeometry().Area();
}
else if (dimension == 3)
{
ElementalVolume = (itElem)->GetGeometry().Volume();
}
if (ElementalVolume < CriticalVolume)
{
// std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl;
(itElem)->Set(ACTIVE, false);
}
else
{
(itElem)->Set(ACTIVE, true);
}
}
}
}
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
}
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
if (it == 0)
{
velocityNorm = this->ComputeVelocityNorm();
}
double DvErrorNorm = NormDv / velocityNorm;
unsigned int iterationForCheck = 2;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("Iteration") << it << " Final Velocity error: " << DvErrorNorm << std::endl;
ConvergedMomentum = this->FixTimeStepMomentum(DvErrorNorm, fixedTimeStep);
}
else if (it > iterationForCheck)
{
KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl;
ConvergedMomentum = this->CheckMomentumConvergence(DvErrorNorm, fixedTimeStep);
}
else
{
KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl;
}
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
bool fixedTimeStep = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
if (it == 0)
{
NormP = this->ComputePressureNorm();
}
double DpErrorNorm = NormDp / (NormP);
// Check convergence
if (it == (maxIt - 1))
{
KRATOS_INFO("Iteration") << it << " Final Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm, fixedTimeStep);
}
else
{
KRATOS_INFO("Iteration") << it << " Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->CheckContinuityConvergence(DpErrorNorm, fixedTimeStep);
}
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
const Vector &N = row(NContainer, 0);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
double ComputeVelocityNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
return NormV;
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / (NormP);
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
double ComputePressureNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
return NormP;
}
bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool converged = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
if (DvErrorNorm < mVelocityTolerance)
{
converged = true;
}
}
return converged;
}
bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.99999;
bool converged = false;
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
if (DvErrorNorm < mVelocityTolerance)
{
converged = true;
}
}
return converged;
}
bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool converged = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
fixedTimeStep = true;
// rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
if (DvErrorNorm > 0.9999)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD PRESSURE CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.1" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else if (DvErrorNorm < mPressureTolerance)
{
converged = true;
fixedTimeStep = false;
}
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
return converged;
}
bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
bool converged = false;
if (DvErrorNorm < mPressureTolerance)
{
converged = true;
fixedTimeStep = false;
}
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
return converged;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
projectq.h | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDQUANTUM_BACKENDS_PROJECTQ_PROJECTQ_H_
#define MINDQUANTUM_BACKENDS_PROJECTQ_PROJECTQ_H_
#include <cmath>
#include <functional>
#include <random>
#include <string>
#include <thread>
#include <vector>
#include "backends/projectq/projectq_utils.h"
#include "core/utils.h"
#include "gate/basic_gate.h"
#include "hamiltonian/hamiltonian.h"
#include "pr/parameter_resolver.h"
#include "projectq/backends/_sim/_cppkernels/simulator.hpp"
namespace mindquantum {
namespace projectq {
template <typename T>
class Projectq : public Simulator {
private:
unsigned n_qubits_;
VT<unsigned> ordering_;
unsigned len_;
RndEngine rnd_eng_;
std::function<double()> rng_;
public:
Projectq() : Simulator(1, 1), n_qubits_(1), rnd_eng_(1) {
for (unsigned i = 0; i < n_qubits_; i++) {
ordering_.push_back(i);
}
len_ = (1UL << (n_qubits_ + 1));
std::uniform_real_distribution<double> dist(0., 1.);
rng_ = std::bind(dist, std::ref(rnd_eng_));
}
Projectq(unsigned seed, unsigned N) : Simulator(seed, N), n_qubits_(N), rnd_eng_(seed) {
for (unsigned i = 0; i < n_qubits_; i++) {
ordering_.push_back(i);
}
len_ = (1UL << (n_qubits_ + 1));
std::uniform_real_distribution<double> dist(0., 1.);
rng_ = std::bind(dist, std::ref(rnd_eng_));
}
Projectq(unsigned seed, unsigned N, calc_type *vec) : Simulator(seed, N), n_qubits_(N), rnd_eng_(seed) {
for (unsigned i = 0; i < n_qubits_; i++) {
ordering_.push_back(i);
}
len_ = (1UL << (n_qubits_ + 1));
set_wavefunction(vec, ordering_);
std::uniform_real_distribution<double> dist(0., 1.);
rng_ = std::bind(dist, std::ref(rnd_eng_));
}
void InitializeSimulator() {
if (vec_ != NULL) {
free(vec_);
}
vec_ = (StateVector) calloc(len_, sizeof(calc_type));
vec_[0] = 1;
}
void InitializeSimulator(const VT<BasicGate<T>> &circ) {
Projectq::InitializeSimulator();
Projectq::ApplyCircuit(circ);
}
void InitializeSimulator(CTP<T> vec) {
}
void SetState(VT<CT<T>> vec) {
set_wavefunction(reinterpret_cast<calc_type *>(vec.data()), ordering_);
}
void ApplyGate(const BasicGate<T> &gate) {
Projectq::apply_controlled_gate(MCast<T>(gate.base_matrix_.matrix_), VCast(gate.obj_qubits_),
VCast(gate.ctrl_qubits_));
}
void ApplyGate(const BasicGate<T> &gate, const ParameterResolver<T> &pr, bool diff = false) {
T theta = LinearCombine(pr, gate.params_);
if (diff) {
Projectq::apply_controlled_gate(MCast<T>(gate.param_diff_matrix_(theta).matrix_), VCast(gate.obj_qubits_),
VCast(gate.ctrl_qubits_));
} else {
Projectq::apply_controlled_gate(MCast<T>(gate.param_matrix_(theta).matrix_), VCast(gate.obj_qubits_),
VCast(gate.ctrl_qubits_));
}
}
unsigned ApplyMeasure(const BasicGate<T> &gate) {
run();
auto qubit = gate.obj_qubits_[0];
auto mask = (1UL << qubit);
calc_type zero_amps = 0;
// #pragma omp parallel for schedule(static) reduction(+ : zero_amps)
for (unsigned i = 0; i < (len_ >> 1); i++) {
if ((i & mask) == 0) {
zero_amps += vec_[2 * i] * vec_[2 * i] + vec_[2 * i + 1] * vec_[2 * i + 1];
}
}
unsigned collapse = (static_cast<unsigned>(rng_() > zero_amps) << qubit);
auto norm = (collapse == 0) ? sqrt(zero_amps) : sqrt(1 - zero_amps);
#pragma omp parallel for schedule(static)
for (unsigned i = 0; i < (len_ >> 1); i++) {
if ((i & mask) == collapse) {
vec_[2 * i] /= norm;
vec_[2 * i + 1] /= norm;
} else {
vec_[2 * i] = 0;
vec_[2 * i + 1] = 0;
}
}
return (collapse >> qubit);
}
void ApplyCircuit(const VT<BasicGate<T>> &circ) {
for (auto &gate : circ) {
Projectq::ApplyGate(gate);
}
Projectq::run();
}
void ApplyCircuit(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr) {
for (auto &gate : circ) {
if (gate.parameterized_) {
Projectq::ApplyGate(gate, pr);
} else {
Projectq::ApplyGate(gate);
}
}
Projectq::run();
}
VT<unsigned> Sampling(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr, size_t shots,
const MST<size_t> &key_map, unsigned seed) {
auto key_size = key_map.size();
VT<unsigned> res(shots * key_size);
RndEngine rnd_eng = RndEngine(seed);
std::uniform_real_distribution<double> dist(1.0, (1 << 20) * 1.0);
std::function<double()> rng = std::bind(dist, std::ref(rnd_eng));
for (size_t i = 0; i < shots; i++) {
Projectq<T> sim = Projectq<T>(static_cast<unsigned>(rng()), n_qubits_, vec_);
auto res0 = sim.ApplyCircuitWithMeasure(circ, pr, key_map);
for (size_t j = 0; j < key_size; j++) {
res[i * key_size + j] = res0[j];
}
}
return res;
}
VT<unsigned> ApplyCircuitWithMeasure(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr,
const MST<size_t> &key_map) {
auto key_size = key_map.size();
VT<unsigned> res(key_size);
for (auto &gate : circ) {
if (gate.is_measure_) {
auto collapse = ApplyMeasure(gate);
res[key_map.at(gate.name_)] = collapse;
} else if (gate.parameterized_) {
ApplyGate(gate, pr);
} else {
ApplyGate(gate);
}
}
return res;
}
void ApplyHamiltonian(const Hamiltonian<T> &ham) {
Projectq::run();
if (ham.how_to_ == ORIGIN) {
Projectq::apply_qubit_operator(HCast<T>(ham.ham_), Projectq::ordering_);
} else if (ham.how_to_ == BACKEND) {
Projectq::vec_ = sparse::Csr_Dot_Vec<T, double>(ham.ham_sparse_main_, ham.ham_sparse_second_,
Projectq::vec_);
} else {
Projectq::vec_ = sparse::Csr_Dot_Vec<T, double>(ham.ham_sparse_main_, Projectq::vec_);
}
}
VT<CT<T>> RightSizeGrad(calc_type *left_vec, calc_type *right_vec, const Hamiltonian<T> &ham,
const VT<BasicGate<T>> &circ, const VT<BasicGate<T>> &herm_circ,
const ParameterResolver<T> &pr, const MST<size_t> &p_map) {
VT<CT<T>> f_g(p_map.size() + 1, 0);
Projectq<T> sim_left = Projectq<T>(1, n_qubits_, left_vec);
sim_left.ApplyHamiltonian(ham);
f_g[0] = ComplexInnerProduct<T, calc_type>(sim_left.vec_, right_vec, static_cast<Index>(len_));
Projectq<T> sim_right = Projectq<T>(1, n_qubits_, right_vec);
Projectq<T> sim_right_tmp = Projectq<T>(1, n_qubits_);
for (size_t j = 0; j < circ.size(); j++) {
if ((!herm_circ[j].parameterized_) || (herm_circ[j].params_.requires_grad_parameters_.size() == 0)) {
if (herm_circ[j].parameterized_) {
sim_left.ApplyGate(herm_circ[j], pr, false);
sim_right.ApplyGate(herm_circ[j], pr, false);
} else {
sim_left.ApplyGate(herm_circ[j]);
sim_right.ApplyGate(herm_circ[j]);
}
} else {
sim_right.ApplyGate(herm_circ[j], pr, false);
sim_right.run();
sim_right_tmp.set_wavefunction(sim_right.vec_, ordering_);
sim_right_tmp.ApplyGate(circ[circ.size() - j - 1], pr, true);
sim_right_tmp.run();
sim_left.run();
CT<T> gi = 0;
if (herm_circ[j].ctrl_qubits_.size() == 0) {
gi = ComplexInnerProduct<T, calc_type>(sim_left.vec_, sim_right_tmp.vec_, static_cast<Index>(len_));
} else {
gi = ComplexInnerProductWithControl<T, calc_type>(sim_left.vec_, sim_right_tmp.vec_,
static_cast<Index>(len_),
GetControlMask(herm_circ[j].ctrl_qubits_));
}
for (auto &it : herm_circ[j].params_.requires_grad_parameters_) {
f_g[1 + p_map.at(it)] += circ[circ.size() - j - 1].params_.data_.at(it) * gi;
}
sim_left.ApplyGate(herm_circ[j], pr, false);
}
}
return f_g;
}
CT<T> GetExpectation(const Hamiltonian<T> &ham) {
Projectq<T> sim = Projectq<T>(1, n_qubits_, vec_);
sim.ApplyHamiltonian(ham);
auto out = ComplexInnerProduct<T, calc_type>(sim.vec_, vec_, static_cast<Index>(len_));
return out;
}
VT<VT<CT<T>>> HermitianMeasureWithGrad(const VT<Hamiltonian<T>> &hams, const VT<BasicGate<T>> &circ,
const VT<BasicGate<T>> &herm_circ, const ParameterResolver<T> &pr,
const MST<size_t> &p_map, size_t mea_threads) {
auto n_hams = hams.size();
auto n_params = pr.data_.size();
VT<VT<CT<T>>> output;
for (size_t i = 0; i < n_hams; i++) {
output.push_back({});
for (size_t j = 0; j < n_params + 1; j++) {
output[i].push_back({0, 0});
}
}
Projectq<T> sim = Projectq<T>(1, n_qubits_, vec_);
sim.ApplyCircuit(circ, pr);
if (n_hams == 1) {
auto f_g = sim.RightSizeGrad(sim.vec_, sim.vec_, hams[0], circ, herm_circ, pr, p_map);
for (size_t g = 1; g < n_params + 1; g++) {
f_g[g] += std::conj(f_g[g]);
}
output[0] = f_g;
} else {
std::vector<std::thread> tasks;
tasks.reserve(mea_threads);
size_t end = 0;
size_t offset = n_hams / mea_threads;
size_t left = n_hams % mea_threads;
for (size_t i = 0; i < mea_threads; ++i) {
size_t start = end;
end = start + offset;
if (i < left) {
end += 1;
}
auto task = [&, start, end]() {
for (size_t n = start; n < end; n++) {
auto f_g = sim.RightSizeGrad(sim.vec_, sim.vec_, hams[n], circ, herm_circ, pr, p_map);
for (size_t g = 1; g < n_params + 1; g++) {
f_g[g] += std::conj(f_g[g]);
}
output[n] = f_g;
}
};
tasks.emplace_back(task);
}
for (auto &t : tasks) {
t.join();
}
}
return output;
}
VT<VT<VT<CT<T>>>> HermitianMeasureWithGrad(const VT<Hamiltonian<T>> &hams, const VT<BasicGate<T>> &circ,
const VT<BasicGate<T>> &herm_circ, const VVT<T> &enc_data,
const VT<T> &ans_data, const VS &enc_name, const VS &ans_name,
size_t batch_threads, size_t mea_threads) {
auto n_hams = hams.size();
auto n_prs = enc_data.size();
auto n_params = enc_name.size() + ans_name.size();
VT<VT<VT<CT<T>>>> output;
for (size_t i = 0; i < n_prs; i++) {
output.push_back({});
for (size_t j = 0; j < n_hams; j++) {
output[i].push_back({});
for (size_t k = 0; k < n_params + 1; k++) {
output[i][j].push_back({0, 0});
}
}
}
MST<size_t> p_map;
for (size_t i = 0; i < enc_name.size(); i++) {
p_map[enc_name[i]] = i;
}
for (size_t i = 0; i < ans_name.size(); i++) {
p_map[ans_name[i]] = i + enc_name.size();
}
if (n_prs == 1) {
ParameterResolver<T> pr = ParameterResolver<T>();
pr.SetData(enc_data[0], enc_name);
pr.SetData(ans_data, ans_name);
output[0] = HermitianMeasureWithGrad(hams, circ, herm_circ, pr, p_map, mea_threads);
} else {
std::vector<std::thread> tasks;
tasks.reserve(batch_threads);
size_t end = 0;
size_t offset = n_prs / batch_threads;
size_t left = n_prs % batch_threads;
for (size_t i = 0; i < batch_threads; ++i) {
size_t start = end;
end = start + offset;
if (i < left) {
end += 1;
}
auto task = [&, start, end]() {
for (size_t n = start; n < end; n++) {
ParameterResolver<T> pr = ParameterResolver<T>();
pr.SetData(enc_data[n], enc_name);
pr.SetData(ans_data, ans_name);
auto f_g = HermitianMeasureWithGrad(hams, circ, herm_circ, pr, p_map, mea_threads);
output[n] = f_g;
}
};
tasks.emplace_back(task);
}
for (auto &t : tasks) {
t.join();
}
}
return output;
}
VT<VT<CT<T>>> NonHermitianMeasureWithGrad(const VT<Hamiltonian<T>> &hams, const VT<Hamiltonian<T>> &herm_hams,
const VT<BasicGate<T>> &left_circ, const VT<BasicGate<T>> &herm_left_circ,
const VT<BasicGate<T>> &right_circ,
const VT<BasicGate<T>> &herm_right_circ, const ParameterResolver<T> &pr,
const MST<size_t> &p_map, size_t mea_threads, const StateVector varphi) {
auto n_hams = hams.size();
auto n_params = pr.data_.size();
VT<VT<CT<T>>> output;
for (size_t i = 0; i < n_hams; i++) {
output.push_back({});
for (size_t j = 0; j < n_params + 1; j++) {
output[i].push_back({0, 0});
}
}
Projectq<T> sim = Projectq<T>(1, n_qubits_, vec_);
sim.ApplyCircuit(right_circ, pr);
Projectq<T> sim2 = Projectq<T>(1, n_qubits_, varphi);
sim2.ApplyCircuit(left_circ, pr);
if (n_hams == 1) {
auto f_g1 = sim2.RightSizeGrad(sim.vec_, sim2.vec_, herm_hams[0], left_circ, herm_left_circ, pr, p_map);
auto f_g2 = sim.RightSizeGrad(sim2.vec_, sim.vec_, hams[0], right_circ, herm_right_circ, pr, p_map);
for (size_t g = 1; g < n_params + 1; g++) {
f_g2[g] += std::conj(f_g1[g]);
}
output[0] = f_g2;
} else {
std::vector<std::thread> tasks;
tasks.reserve(mea_threads);
size_t end = 0;
size_t offset = n_hams / mea_threads;
size_t left = n_hams % mea_threads;
for (size_t i = 0; i < mea_threads; i++) {
size_t start = end;
end = start + offset;
if (i < left) {
end += 1;
}
auto task = [&, start, end]() {
for (size_t n = start; n < end; n++) {
auto f_g1 = sim2.RightSizeGrad(sim.vec_, sim2.vec_, herm_hams[n], left_circ, herm_left_circ, pr,
p_map);
auto f_g2 = sim.RightSizeGrad(sim2.vec_, sim.vec_, hams[n], right_circ, herm_right_circ, pr,
p_map);
for (size_t g = 1; g < n_params + 1; g++) {
f_g2[g] += std::conj(f_g1[g]);
}
output[n] = f_g2;
}
};
tasks.emplace_back(task);
}
for (auto &t : tasks) {
t.join();
}
}
return output;
}
VT<VT<VT<CT<T>>>> NonHermitianMeasureWithGrad(
const VT<Hamiltonian<T>> &hams, const VT<Hamiltonian<T>> &herm_hams, const VT<BasicGate<T>> &left_circ,
const VT<BasicGate<T>> &herm_left_circ, const VT<BasicGate<T>> &right_circ,
const VT<BasicGate<T>> &herm_right_circ, const VVT<T> &enc_data, const VT<T> &ans_data, const VS &enc_name,
const VS &ans_name, size_t batch_threads, size_t mea_threads, const Projectq<T> &simulator_left) {
StateVector varphi = simulator_left.vec_;
auto n_hams = hams.size();
auto n_prs = enc_data.size();
auto n_params = enc_name.size() + ans_name.size();
VT<VT<VT<CT<T>>>> output;
for (size_t i = 0; i < n_prs; i++) {
output.push_back({});
for (size_t j = 0; j < n_hams; j++) {
output[i].push_back({});
for (size_t k = 0; k < n_params + 1; k++) {
output[i][j].push_back({0, 0});
}
}
}
MST<size_t> p_map;
for (size_t i = 0; i < enc_name.size(); i++) {
p_map[enc_name[i]] = i;
}
for (size_t i = 0; i < ans_name.size(); i++) {
p_map[ans_name[i]] = i + enc_name.size();
}
if (n_prs == 1) {
ParameterResolver<T> pr = ParameterResolver<T>();
pr.SetData(enc_data[0], enc_name);
pr.SetData(ans_data, ans_name);
output[0] = NonHermitianMeasureWithGrad(hams, herm_hams, left_circ, herm_left_circ, right_circ,
herm_right_circ, pr, p_map, mea_threads, varphi);
} else {
std::vector<std::thread> tasks;
tasks.reserve(batch_threads);
size_t end = 0;
size_t offset = n_prs / batch_threads;
size_t left = n_prs % batch_threads;
for (size_t i = 0; i < batch_threads; ++i) {
size_t start = end;
end = start + offset;
if (i < left) {
end += 1;
}
auto task = [&, start, end]() {
for (size_t n = start; n < end; n++) {
ParameterResolver<T> pr = ParameterResolver<T>();
pr.SetData(enc_data[n], enc_name);
pr.SetData(ans_data, ans_name);
auto f_g = NonHermitianMeasureWithGrad(hams, herm_hams, left_circ, herm_left_circ, right_circ,
herm_right_circ, pr, p_map, mea_threads, varphi);
output[n] = f_g;
}
};
tasks.emplace_back(task);
}
for (auto &t : tasks) {
t.join();
}
}
return output;
}
void PrintInfo() const {
std::cout << n_qubits_ << " qubits simulator with currently quantum state at:" << std::endl;
for (unsigned i = 0; i < (len_ >> 1); i++) {
std::cout << "(" << vec_[2 * i] << ", " << vec_[2 * i + 1] << ")" << std::endl;
}
}
VVT<CT<calc_type>> GetCircuitMatrix(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr) {
VVT<CT<calc_type>> out((1 << n_qubits_));
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < (1UL << n_qubits_); i++) {
Projectq<T> sim = Projectq<T>(0, n_qubits_);
sim.vec_[0] = 0;
sim.vec_[2 * i] = 1;
sim.ApplyCircuit(circ, pr);
out[i] = sim.cheat();
}
return out;
}
};
} // namespace projectq
} // namespace mindquantum
#endif // MINDQUANTUM_BACKENDS_PROJECTQ_PROJECTQ_H_
|
fig3.10-mxv-omp.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
void mxv(int m, int n, double * restrict a,
double * restrict b, double * restrict c);
int main(int argc, char *argv[])
{
double *a,*b,*c;
int i, j, m, n;
printf("Please give m and n: ");
scanf("%d %d",&m,&n);
printf("\n");
if ( (a=(double *)malloc(m*sizeof(double))) == NULL )
perror("memory allocation for a");
if ( (b=(double *)malloc(m*n*sizeof(double))) == NULL )
perror("memory allocation for b");
if ( (c=(double *)malloc(n*sizeof(double))) == NULL )
perror("memory allocation for c");
printf("Initializing matrix B and vector c\n");
for (j=0; j<n; j++)
c[j] = 2.0;
for (i=0; i<m; i++)
for (j=0; j<n; j++)
b[i*n+j] = i;
printf("Executing mxv function for m = %d n = %d\n",m,n);
(void) mxv(m, n, a, b, c);
free(a);free(b);free(c);
return(0);
}
void mxv(int m, int n, double * restrict a, double * restrict b,
double * restrict c)
{
int i, j;
#pragma omp parallel for default(none) \
shared(m,n,a,b,c) private(i,j)
for (i=0; i<m; i++)
{
a[i] = 0.0;
for (j=0; j<n; j++)
a[i] += b[i*n+j]*c[j];
} /*-- End of omp parallel for --*/
}
|
pxgstrf_synch.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*
* -- SuperLU MT routine (version 2.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* August 15, 1997
*
* Modified: March 20, 2013 version 2.1
*/
#include <stdio.h>
#include <math.h>
#include "slu_mt_ddefs.h"
#define SPLIT_TOP
int_t
ParallelInit(int_t n, pxgstrf_relax_t *pxgstrf_relax,
superlumt_options_t *superlumt_options,
pxgstrf_shared_t *pxgstrf_shared)
{
int_t *etree = superlumt_options->etree;
register int_t w, dad, ukids, i, j, k, rs, panel_size, relax;
register int_t P, w_top, do_split = 0;
panel_t panel_type;
int_t *panel_histo = pxgstrf_shared->Gstat->panel_histo;
register int_t nthr, concurrency, info;
Gstat_t *Gstat = pxgstrf_shared->Gstat;
#if ( MACH==SUN )
register int_t sync_type = USYNC_THREAD;
/* Set concurrency level. */
nthr = sysconf(_SC_NPROCESSORS_ONLN);
thr_setconcurrency(nthr); /* number of LWPs */
concurrency = thr_getconcurrency();
#if ( PRNTlevel==1 )
printf(".. CPUs " IFMT ", concurrency (#LWP) " IFMT ", P " IFMT "\n",
nthr, concurrency, P);
#endif
/* Initialize mutex variables. */
pxgstrf_shared->lu_locks = (mutex_t *)
SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(mutex_t));
for (i = 0; i < NO_GLU_LOCKS; ++i)
mutex_init(&pxgstrf_shared->lu_locks[i], sync_type, 0);
#elif ( MACH==DEC || MACH==PTHREAD )
pxgstrf_shared->lu_locks = (pthread_mutex_t *)
SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(pthread_mutex_t));
for (i = 0; i < NO_GLU_LOCKS; ++i)
pthread_mutex_init(&pxgstrf_shared->lu_locks[i], NULL);
#else
pxgstrf_shared->lu_locks =
(mutex_t *) SUPERLU_MALLOC( NO_GLU_LOCKS * sizeof(mutex_t) );
#endif
#if ( PRNTlevel==1 )
printf(".. ParallelInit() ... nprocs %2d\n", superlumt_options->nprocs);
#endif
pxgstrf_shared->spin_locks = intCalloc(n);
pxgstrf_shared->pan_status =
(pan_status_t *) SUPERLU_MALLOC((n+1)*sizeof(pan_status_t));
pxgstrf_shared->fb_cols = intMalloc(n+1);
panel_size = superlumt_options->panel_size;
relax = superlumt_options->relax;
w = SUPERLU_MAX(panel_size, relax) + 1;
for (i = 0; i < w; ++i) panel_histo[i] = 0;
pxgstrf_shared->num_splits = 0;
if ( (info = queue_init(&pxgstrf_shared->taskq, n)) ) {
fprintf(stderr, "ParallelInit(): " IFMT "\n", info);
SUPERLU_ABORT("queue_init fails.");
}
/* Count children of each node in the etree. */
for (i = 0; i <= n; ++i) pxgstrf_shared->pan_status[i].ukids = 0;
for (i = 0; i < n; ++i) {
dad = etree[i];
++pxgstrf_shared->pan_status[dad].ukids;
}
/* Find the panel partitions and initialize each panel's status */
#ifdef PROFILE
Gstat->num_panels = 0;
#endif
pxgstrf_shared->tasks_remain = 0;
rs = 1; /* index for the next relaxed s-node */
w_top = panel_size/2;
if ( w_top == 0 ) w_top = 1;
P = 12;
for (i = 0; i < n; ) {
if ( pxgstrf_relax[rs].fcol == i ) {
w = pxgstrf_relax[rs++].size;
panel_type = RELAXED_SNODE;
pxgstrf_shared->pan_status[i].state = CANGO;
} else {
/* Adjust panel_size so that a panel won't overlap with
the next relaxed snode. */
#if 0
/* Only works when etree is postordered. */
w = SUPERLU_MIN(panel_size, pxgstrf_relax[rs].fcol - i);
#else
w = panel_size;
for (k = i + 1; k < SUPERLU_MIN(i + panel_size, n); ++k)
if ( k == pxgstrf_relax[rs].fcol ) {
w = k - i; /* panel stops at column k-1 */
break;
}
if ( k == n ) w = n - i;
#endif
#ifdef SPLIT_TOP
if ( !do_split ) {
if ( (n-i) < panel_size * P ) do_split = 1;
}
if ( do_split && w > w_top ) { /* split large panel */
w = w_top;
++pxgstrf_shared->num_splits;
}
#endif
for (j = i+1; j < i + w; ++j)
/* Do not allow panel to cross a branch point in the etree. */
if ( pxgstrf_shared->pan_status[j].ukids > 1 ) break;
w = j - i; /* j should start a new panel */
panel_type = REGULAR_PANEL;
pxgstrf_shared->pan_status[i].state = UNREADY;
#ifdef DOMAINS
if ( in_domain[i] == TREE_DOMAIN ) panel_type = TREE_DOMAIN;
#endif
}
if ( panel_type == REGULAR_PANEL ) {
++pxgstrf_shared->tasks_remain;
/*printf("nondomain panel %6d -- %6d\n", i, i+w-1);
fflush(stdout);*/
}
ukids = k = 0;
for (j = i; j < i + w; ++j) {
pxgstrf_shared->pan_status[j].size = k--;
pxgstrf_shared->pan_status[j].type = panel_type;
ukids += pxgstrf_shared->pan_status[j].ukids;
}
pxgstrf_shared->pan_status[i].size = w; /* leading column */
/* only count those kids outside the panel */
pxgstrf_shared->pan_status[i].ukids = ukids - (w-1);
panel_histo[w]++;
#ifdef PROFILE
Gstat->panstat[i].size = w;
++Gstat->num_panels;
#endif
pxgstrf_shared->fb_cols[i] = i;
i += w; /* move to the next panel */
} /* for i ... */
/* Dummy root */
pxgstrf_shared->pan_status[n].size = 1;
pxgstrf_shared->pan_status[n].state = UNREADY;
#if ( PRNTlevel==1 )
printf(".. Split: P " IFMT ", #nondomain panels " IFMT "\n", P, pxgstrf_shared->tasks_remain);
#endif
#ifdef DOMAINS
EnqueueDomains(&pxgstrf_shared->taskq, list_head, pxgstrf_shared);
#else
EnqueueRelaxSnode(&pxgstrf_shared->taskq, n, pxgstrf_relax, pxgstrf_shared);
#endif
#if ( PRNTlevel==1 )
printf(".. # tasks " IFMT "\n", pxgstrf_shared->tasks_remain);
fflush(stdout);
#endif
#ifdef PREDICT_OPT
/* Set up structure describing children */
for (i = 0; i <= n; cp_firstkid[i++] = EMPTY);
for (i = n-1; i >= 0; i--) {
dad = etree[i];
cp_nextkid[i] = cp_firstkid[dad];
cp_firstkid[dad] = i;
}
#endif
return 0;
} /* ParallelInit */
/*
* Free the storage used by the parallel scheduling algorithm.
*/
int_t ParallelFinalize(pxgstrf_shared_t *pxgstrf_shared)
{
/* Destroy mutexes */
#if ( MACH==SUN )
register int_t i;
for (i = 0; i < NO_GLU_LOCKS; ++i)
mutex_destroy( &pxgstrf_shared->lu_locks[i] );
#elif ( MACH==DEC || MACH==PTHREAD )
register int_t i;
for (i = 0; i < NO_GLU_LOCKS; ++i)
pthread_mutex_destroy( &pxgstrf_shared->lu_locks[i] );
#endif
SUPERLU_FREE ((void*)pxgstrf_shared->lu_locks);
SUPERLU_FREE ((int_t*)pxgstrf_shared->spin_locks);
SUPERLU_FREE (pxgstrf_shared->pan_status);
SUPERLU_FREE (pxgstrf_shared->fb_cols);
SUPERLU_FREE (pxgstrf_shared->Glu->map_in_sup);
queue_destroy(&pxgstrf_shared->taskq);
#if ( PRNTlevel==1 )
printf(".. # panel splittings " IFMT "\n", pxgstrf_shared->num_splits);
#endif
return 0;
}
int_t queue_init(queue_t *q, int_t n)
{
if ( n < 1 ) return (-1);
q->queue = (qitem_t *) SUPERLU_MALLOC(n*sizeof(qitem_t));
q->count = 0;
q->head = 0;
q->tail = 0;
return 0;
}
int_t queue_destroy(queue_t *q)
{
SUPERLU_FREE( q->queue );
return 0;
}
/*
* Return value: number of items in the queue
*/
int_t Enqueue(queue_t *q, qitem_t item)
{
q->queue[q->tail++] = item;
++q->count;
return (q->count);
}
/*
* Return value: >= 0 number of items in the queue
* = -1 queue is empty
*/
int_t Dequeue(queue_t *q, qitem_t *item)
{
if ( q->count <= 0 ) return EMPTY;
*item = q->queue[q->head++];
--q->count;
return (q->count);
}
int_t QueryQueue(queue_t *q)
{
register int_t i;
printf("Queue count: " IFMT "\n", q->count);
for (i = q->head; i < q->tail; ++i) printf(IFMT "\titem " IFMT "\n", i, q->queue[i]);
return 0;
}
int_t EnqueueRelaxSnode(queue_t *q, int_t n, pxgstrf_relax_t *pxgstrf_relax,
pxgstrf_shared_t *pxgstrf_shared)
{
register int_t rs, j, m;
m = pxgstrf_relax[0].size;
for (rs = 1; rs <= m; ++rs) {
j = pxgstrf_relax[rs].fcol;
q->queue[q->tail++] = j;
q->count++;
++pxgstrf_shared->tasks_remain;
}
#if ( PRNTlevel==1 )
printf(".. EnqueueRelaxSnode(): count " IFMT "\n", q->count);
#endif
return 0;
}
/*
* Enqueue the initial independent domains.
* A pair of two numbers {root, fst_desc} is added in the queue.
*/
/*int EnqueueDomains(int P, queue_t *q, struct Branch **proc_domains_h)*/
int_t EnqueueDomains(queue_t *q, struct Branch *list_head,
pxgstrf_shared_t *pxgstrf_shared)
{
struct Branch *b, *thrash;
/* for (pnum = 0; pnum < P; ++pnum) {
for (b = proc_domains_h[pnum]; b != NULL; ) {*/
b = list_head;
while ( b ) {
thrash = b;
q->queue[q->tail++] = b->root;
q->queue[q->tail++] = b->first_desc;
q->count = q->count + 2;
STATE ( b->root ) = CANGO;
++pxgstrf_shared->tasks_remain;
b = b->next;
SUPERLU_FREE (thrash);
}
printf("EnqueueDomains(): count " IFMT "\n", q->count);
return 0;
}
int_t NewNsuper(const int_t pnum, pxgstrf_shared_t *pxgstrf_shared, int_t *data)
{
register int_t i;
mutex_t *lock = &pxgstrf_shared->lu_locks[NSUPER_LOCK];
Gstat_t *Gstat = pxgstrf_shared->Gstat;
#ifdef PROFILE
double t = SuperLU_timer_();
#endif
#if ( MACH==SUN )
mutex_lock(lock);
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_lock(lock);
#elif ( MACH==SGI || MACH==ORIGIN )
#pragma critical lock(lock)
#elif ( MACH==CRAY_PVP )
#pragma _CRI guard (*lock)
#elif ( MACH==OPENMP )
#pragma omp critical ( NSUPER_LOCK )
#endif
{
i = ++(*data);
}
#if ( MACH==SUN )
mutex_unlock(lock);
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_unlock(lock);
#elif ( MACH==CRAY_PVP )
#pragma _CRI endguard (*lock)
#endif
#ifdef PROFILE
Gstat->procstat[pnum].cs_time += SuperLU_timer_() - t;
#endif
return i;
}
int_t lockon(int_t *block)
{
while ( *block ) ; /* spin-wait */
*block = 1;
return 0;
}
int_t lockoff(int_t *block)
{
*block = 0;
return 0;
}
|
test10.c | int g1 = 10;
void bar() {
0;
if (1) {
2;
l1:
#pragma omp barrier
3;
} else {
4;
l2:
#pragma omp barrier
5;
bar();
6;
}
7;
}
void foo() {
8;
if (9) {
10;
l3:
#pragma omp barrier
11;
} else {
12;
l4:
#pragma omp barrier
13;
foo();
14;
}
15;
}
int main() {
#pragma omp parallel
{
16;
if (17) {
18;
foo();
19;
} else {
20;
bar();
21;
}
22;
l5:
#pragma omp barrier
23;
}
}
|
diagsm_x_coo_n_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_Number diag[A->rows];
memset(diag, '\0', A->rows * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->nnz; r++)
{
if(A->row_indx[r] == A->col_indx[r])
{
diag[A->row_indx[r]] = A->values[r];
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
ALPHA_Number t;
alpha_mul(t, alpha, x[index2(c, r, ldx)]);
alpha_div(y[index2(c, r, ldy)], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,
exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1.0)*cosine)+
fabs((double) (image->rows-1.0)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/
2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)*
(image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1.0)/2.0;
gradient->radii.y=(double) (image->rows-1.0)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin((image->columns-1.0),
(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memcpy(gradient->stops,stops,(size_t) number_stops*sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
register ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
register ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) memset(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register size_t
*histogram;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) memset(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(linear_image,i);
PixelTrait traits = GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if ((paint_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
PixelTrait
traits;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(image,(Quantum) conform_fill.red,q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(image,(Quantum) conform_fill.green,q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(image,(Quantum) conform_fill.blue,q);
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlack(image,(Quantum) conform_fill.black,q);
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelAlpha(image,(Quantum) conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,OpaquePaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
DRB071-targetparallelfor-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target: len is not mapped. It should be firstprivate within target.
*/
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int len = 1000;
int a[len];
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = i;
}
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = a[i] + 1;
}
for (i = 0; i <= len - 1; i += 1) {
printf("%d",a[i]);
}
return 0;
}
|
Simpson.h | #pragma once
#include <omp.h>
#include <iostream>
namespace Simpson
{
#define Eps (0.000000000001)
inline bool Test(double sequent, double parallel)
{
bool OK = true;
if (abs(sequent - parallel) > Eps)
OK = false;
std::cout << "\n Check the results ...";
if (OK != true)
std::cout << "Warning!!! Something went wrong." << std::endl;
else
std::cout << "Successfully!!!" << std::endl;
return OK;
}
inline void CheckResults(double sequentTimeWork, double parallTimeWork)
{
std::cout << "\n Who is faster? ...";
if (parallTimeWork < sequentTimeWork)
std::cout << " Parallel algorithm" << std::endl;
else
std::cout << " Sequential algorithm" << std::endl;
std::cout.precision(3);
std::cout.setf(std::ios::fixed);
std::cout << " Speedup: " << sequentTimeWork / parallTimeWork << std::endl;
}
////////////////////////////////////////////////////////////////////////////////////////////
double Simpson_integration(double(*f)(double, double), double a, double b, int N, double fixX)
{
double h = static_cast<double>(b - a) / N;
double S4 = f(fixX, a + h), S2 = 0.;
for (int i = 3; i < N; i += 2)
{
S4 += f(fixX, a + i*h);
S2 += f(fixX, a + (i - 1)*h);
}
return (h / 3) * (f(fixX, a) + 4 * S4 + 2 * S2 + f(fixX, b));
}
double Simpson2(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
double I2 = 0., I3 = 0.;
for (int j = 1; j < N; j += 2)
{
I3 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
if (j != 1)
I2 += Simpson_integration(F, a1, b1, N, a2 + (j - 1)*h2);
}
return sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * I2 + 4 * I3 + Simpson_integration(F, a1, b1, N, b2));
}
double Simpson2_OMP_parallel(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
double I2 = 0., I3 = 0.;
int j = 0;
#pragma omp parallel for shared(a1, b1, a2, N, h2) private(j) reduction(+:I3,I2)
for (j = 1; j < N; j += 2)
{
I3 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
if (j != 1)
I2 += Simpson_integration(F, a1, b1, N, a2 + (j - 1)*h2);
}
return sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * I2 + 4 * I3 + Simpson_integration(F, a1, b1, N, b2));
}
double Simpson2_test0(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
//tbb::tick_count ti1, ti2;
//ti1 = tbb::tick_count::now();
double t1 = 0., t2 = 0.;
t1 = omp_get_wtime();
double I2 = 0., I3 = 0.;
for (int j = 1; j < N; ++j)
{
if (j % 2 == 1)
I3 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
else
I2 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
}
t2 = omp_get_wtime();
//ti2 = tbb::tick_count::now();
double s = sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * I2 + 4 * I3 + Simpson_integration(F, a1, b1, N, b2));
//return (ti2 - ti1).seconds() * 1000.;
return (t2 - t1) * 1000.;
}
double Simpson2_test1(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
//tbb::tick_count ti1, ti2;
//ti1 = tbb::tick_count::now();
double t1 = 0., t2 = 0.;
t1 = omp_get_wtime();
double I2 = 0., I3 = 0.;
for (int j = 1; j < N; j += 2)
{
I3 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
if (j != 1)
I2 += Simpson_integration(F, a1, b1, N, a2 + (j - 1)*h2);
}
t2 = omp_get_wtime();
//ti2 = tbb::tick_count::now();
double s = sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * I2 + 4 * I3 + Simpson_integration(F, a1, b1, N, b2));
//return (ti2 - ti1).seconds() * 1000.;
return (t2 - t1) * 1000.;
}
double Simpson2_test2(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
//tbb::tick_count ti1, ti2;
//ti1 = tbb::tick_count::now();
double t1 = 0., t2 = 0.;
t1 = omp_get_wtime();
double I2 = 0., I3 = 0.;
I3 += Simpson_integration(F, a1, b1, N, a2 + h2);
for (int j = 3; j < N; j += 2)
{
I3 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
I2 += Simpson_integration(F, a1, b1, N, a2 + (j - 1)*h2);
}
t2 = omp_get_wtime();
//ti2 = tbb::tick_count::now();
double s = sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * I2 + 4 * I3 + Simpson_integration(F, a1, b1, N, b2));
//return (ti2 - ti1).seconds() * 1000.;
return (t2 - t1) * 1000.;
}
///////////////////////////////////////////////////////////////////////////////////////////
double Simpson2_OMP_parallel_test(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
//tbb::tick_count ti1, ti2;
//ti1 = tbb::tick_count::now();
double t1 = 0., t2 = 0.;
t1 = omp_get_wtime();
double I2 = 0., I3 = 0.;
int j = 0;
#pragma omp parallel for shared(a1, b1, a2, N, h2) private(j) reduction(+:I3,I2)
for (j = 1; j < N; j += 2)
{
I3 += Simpson_integration(F, a1, b1, N, a2 + j*h2);
if (j != 1)
I2 += Simpson_integration(F, a1, b1, N, a2 + (j - 1)*h2);
}
t2 = omp_get_wtime();
//ti2 = tbb::tick_count::now();
double s = sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * I2 + 4 * I3 + Simpson_integration(F, a1, b1, N, b2));
//return (ti2 - ti1).seconds() * 1000.;
return (t2 - t1) * 1000.;
}
double Simpson2_TBB_parallel_test(double(*F)(double, double), double a1, double b1, double a2, double b2, int n)
{
if (n <= 0)
return 0.;
int sign = 1;
if (abs(a1 - b1) < Eps || abs(a2 - b2) < Eps)
return 0.;
if (a1 > b1)
{
double tmp = b1;
b1 = a1;
a1 = b1;
sign *= -1;
}
if (a2 > b2)
{
double tmp = b2;
b2 = a2;
a2 = b2;
sign *= -1;
}
int N = 2 * n;
double h2 = static_cast<double>(b2 - a2) / N;
//tbb::tick_count ti1, ti2;
//ti1 = tbb::tick_count::now();
double t1 = 0., t2 = 0.;
t1 = omp_get_wtime();
tbb::task_scheduler_init init(4);
simpson_tbb_reduce stbbr(F, a1, b1, N, a2, h2);
tbb::parallel_reduce(tbb::blocked_range<int>(1, N, 5), stbbr);
init.terminate();
t2 = omp_get_wtime();
//ti2 = tbb::tick_count::now();
double s = sign * (h2 / 3) * (Simpson_integration(F, a1, b1, N, a2) + 2 * stbbr.getI2() + 4 * stbbr.getI3() + Simpson_integration(F, a1, b1, N, b2));
//return (ti2 - ti1).seconds() * 1000.;
return (t2 - t1) * 1000.;
}
///////////////////////////////////////////////////////////////////////////////////////////
inline double func2(double x, double y)
{
return x*y*y;
}
inline double func1(double x, double y)
{
return (x*x*y - x) / sqrt(x + y*y*x);
}
void Test_serial_code_simpson()
{
int num_of_experiments = 10;
std::cout << "\n *********** Hello from a function: run sub_main() ****************\n";
std::cout << " please, enter the number of experiments.. ";
std::cin >> num_of_experiments;
int n = 1000;
double t1 = 0., t2 = 0.;
double integration0 = 0., integration1 = 0., integration2 = 0.;
t1 = omp_get_wtime();
for (int i = 0; i < num_of_experiments; ++i)
{
integration0 += Simpson2_test0(&func2, 1, 2, 2, 3, n);
integration0 += Simpson2_test0(&func1, 1, 2, 2, 3, n);
integration1 += Simpson2_test1(&func2, 1, 2, 2, 3, n);
integration1 += Simpson2_test1(&func1, 1, 2, 2, 3, n);
integration2 += Simpson2_test2(&func2, 1, 2, 2, 3, n);
integration2 += Simpson2_test2(&func1, 1, 2, 2, 3, n);
}
t2 = omp_get_wtime();
integration0 /= 2*num_of_experiments;
integration1 /= 2*num_of_experiments;
integration2 /= 2*num_of_experiments;
std::cout.precision(15);
std::cout << "\n average time: ";
std::cout << "\nSimpson0 integrate = " << integration0 << "ms";
std::cout << "\nSimpson1 integrate = " << integration1 << "ms";
std::cout << "\nSimpson2 integrate = " << integration2 << "ms";
std::cout << "\n total time work = " << (t2 - t1) * 1000. << "ms";
std::cout.precision(6);
std::cout << "\n ***************************\n";
return;
}
void Test_serial_vs_parallel()
{
int num_of_experiments = 10;
std::cout << "\n *********** Hello from a function: Test_serial_vs_parallel() ****************\n";
std::cout << " please, enter the number of experiments.. ";
std::cin >> num_of_experiments;
int n = 1000;
double t1 = 0., t2 = 0.;
double integration0 = 0., integration1 = 0., integration2 = 0.;
double par_integration0 = 0., par_integration1 = 0.;
tbb::tick_count ti1, ti2;
t1 = omp_get_wtime();
ti1 = tbb::tick_count::now();
for (int i = 0; i < num_of_experiments; ++i)
{
integration0 += Simpson2_test0(&func2, 1, 2, 2, 3, n);
integration0 += Simpson2_test0(&func1, 1, 2, 2, 3, n);
integration1 += Simpson2_test1(&func2, 1, 2, 2, 3, n);
integration1 += Simpson2_test1(&func1, 1, 2, 2, 3, n);
integration2 += Simpson2_test2(&func2, 1, 2, 2, 3, n);
integration2 += Simpson2_test2(&func1, 1, 2, 2, 3, n);
par_integration0 += Simpson2_OMP_parallel_test(&func2, 1, 2, 2, 3, n);
par_integration0 += Simpson2_OMP_parallel_test(&func1, 1, 2, 2, 3, n);
par_integration1 += Simpson2_TBB_parallel_test(&func2, 1, 2, 2, 3, n);
par_integration1 += Simpson2_TBB_parallel_test(&func1, 1, 2, 2, 3, n);
}
t2 = omp_get_wtime();
ti2 = tbb::tick_count::now();
integration0 /= 2 * num_of_experiments;
integration1 /= 2 * num_of_experiments;
integration2 /= 2 * num_of_experiments;
par_integration0 /= 2 * num_of_experiments;
par_integration1 /= 2 * num_of_experiments;
std::cout.precision(15);
std::cout << "\n average time: ";
std::cout << "\nSimpson0 integrate = " << integration0 << "ms";
std::cout << "\nSimpson1 integrate = " << integration1 << "ms";
std::cout << "\nSimpson2 integrate = " << integration2 << "ms";
std::cout << "\n";
std::cout << "\nSimpson_OMP integrate = " << par_integration0 << "ms";
std::cout << "\nSimpson_TBB integrate = " << par_integration1 << "ms";
std::cout << "\n";
std::cout << "\nSpeedup Simpson0 / OMP = " << integration0 / par_integration0;
std::cout << "\nSpeedup Simpson1 / OMP = " << integration1 / par_integration0;
std::cout << "\nSpeedup Simpson2 / OMP = " << integration2 / par_integration0;
std::cout << "\n";
std::cout << "\nSpeedup Simpson0 / TBB = " << integration0 / par_integration1;
std::cout << "\nSpeedup Simpson1 / TBB = " << integration1 / par_integration1;
std::cout << "\nSpeedup Simpson2 / TBB = " << integration2 / par_integration1;
std::cout << "\n Omptimer total time work = " << (t2 - t1) * 1000. << "ms";
std::cout << "\n TBBtimer total time work = " << (ti2 - ti1).seconds() * 1000. << "ms";
std::cout.precision(6);
std::cout << "\n ***************************\n";
return;
}
void Test_ser_vs_par()
{
int num_of_experiments = 10;
std::cout << "\n *********** Hello from a function: Test_ser_vs_par() ****************\n";
std::cout << " please, enter the number of experiments.. ";
std::cin >> num_of_experiments;
int n = 1000;
double t1 = 0., t2 = 0.;
double tt1 = 0., tt2 = 0.;
double integration1 = 0.;
double par_integration0 = 0., par_integration1 = 0.;
tbb::tick_count ti1, ti2;
t1 = omp_get_wtime();
ti1 = tbb::tick_count::now();
for (int i = 0; i < num_of_experiments; ++i)
{
tt1 = omp_get_wtime();
Simpson2(&func2, 1, 2, 2, 3, n);
tt2 = omp_get_wtime();
integration1 += (tt2 - tt1)* 1000.;
tt1 = omp_get_wtime();
Simpson2_OMP_parallel(&func2, 1, 2, 2, 3, n);
tt2 = omp_get_wtime();
par_integration0 += (tt2 - tt1)* 1000.;
tt1 = omp_get_wtime();
Simpson2_TBB_parallel(&func2, 1, 2, 2, 3, n);
tt2 = omp_get_wtime();
par_integration1 += (tt2 - tt1)* 1000.;
}
t2 = omp_get_wtime();
ti2 = tbb::tick_count::now();
integration1 /= 1 * num_of_experiments;
par_integration0 /= 1 * num_of_experiments;
par_integration1 /= 1 * num_of_experiments;
std::cout.precision(15);
std::cout << "\n average time: ";
std::cout << "\nSimpson0 integrate = " << integration1 << "ms";
std::cout << "\n";
std::cout << "\nSimpson_OMP integrate = " << par_integration0 << "ms";
std::cout << "\nSimpson_TBB integrate = " << par_integration1 << "ms";
std::cout << "\n";
std::cout << "\nSpeedup Simpson0 / OMP = " << integration1 / par_integration0;
std::cout << "\nSpeedup Simpson0 / TBB = " << integration1 / par_integration1;
std::cout << "\n Omptimer total time work = " << (t2 - t1) * 1000. << "ms";
std::cout << "\n TBBtimer total time work = " << (ti2 - ti1).seconds() * 1000. << "ms";
std::cout.precision(6);
std::cout << "\n ***************************\n";
return;
}
}
|
bugged6.c | /******************************************************************************
* ЗАДАНИЕ: bugged6.c
* ОПИСАНИЕ:
* Множественные ошибки компиляции
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define VECLEN 100
float a[VECLEN], b[VECLEN];
float dotprod()
{
int i, tid;
float sum;
tid = omp_get_thread_num();
// Reduction нужен в верхнем omp paralle
//#pragma omp for reduction(+:sum)
#pragma omp for
for (i = 0; i < VECLEN; i++)
{
sum = sum + (a[i] * b[i]);
printf(" tid= %d i=%d\n", tid, i);
}
// Надо вернуть значение
return sum;
}
int main (int argc, char *argv[])
{
int i;
float sum;
for (i = 0; i < VECLEN; i++)
a[i] = b[i] = 1.0 * i;
sum = 0.0;
// Уберем shared и добавим reduction чтобы складывать результаты вызовов dotprod в разных потоках
//#pragma omp parallel shared(sum)
#pragma omp parallel reduction(+:sum)
sum = dotprod();
printf("Sum = %f\n",sum);
}
|
dufu17r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Boomerang cryptanalysis of SKINNY
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
typedef unsigned long long uint64_t;
// #define DEBUG 1
#define Nthreads 4 // Number of parallel threads utilized in this program
#define NumOfExperiments 100 // Number of independent experiments
#define STEP ((1 << 10) - 1)
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
//int initial_seed = 0x5EC7F2B0;
//int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, unsigned long long N3, unsigned char *dp, unsigned char *dc, unsigned char *k1, unsigned char *k2, unsigned char *k3, unsigned char *k4)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
int num = 0;
for (uint64_t t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, uint64_t N2, uint64_t N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %llu * %llu = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
printf("#Queries per thread = (#Bunches per thread) * (#Queries per bunch) = %llu * %llu = 2^(%f)\n", N2, N3, log(N2 * N3) / log(2));
// randomly choose k1
unsigned char k1[48], k2[48], k3[48], k4[48];;
srand((unsigned)time(NULL));
// randomly choose k1
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (uint64_t j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, k1, k2, k3, k4);
if ((j & STEP) == 0){
printf("PID: %d \t Bunch Number: %llu/%llu\n", ID, j, N2);
}
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int R = 17; // Number of rounds
int ver = 1; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "0000000000000800";
char dc_str[] = "0200000002000200";
char dk1_str[] = "000000000C000000000000000F000000";
char dk2_str[] = "00000000000000400000000000000070";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 14;
int deg2 = 14;
int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg1)
int N3 = 1 << deg2; // Number of queries per bunche : N3 = 2^(deg2)
//################### Number of total queries : N1*N2*N3 ###############
char all_results[NumOfExperiments][20];
double sum = 0;
double sum_temp = 0;
for (int i = 0; i < NumOfExperiments; i++)
{
printf("Experiment Number %d:\n", i);
sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
sum += sum_temp;
sum_temp = (double)(N1 * N2 * N3) / sum_temp;
sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2));
}
printf("A summary of all results:\n");
for (int i = 0; i < NumOfExperiments; i++)
{
printf("%s", all_results[i]);
}
printf("\n##########################\nAverage = 2^(-%0.4f)\n",
(log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
}
|
activations.c | #include "activations.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case SELU:
return "selu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "swish") == 0) return SWISH;
if (strcmp(s, "mish") == 0) return MISH;
if (strcmp(s, "normalize_channels") == 0) return NORM_CHAN;
if (strcmp(s, "normalize_channels_softmax") == 0) return NORM_CHAN_SOFTMAX;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "selu") == 0) return SELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case SELU:
return selu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR) {}
else if (a == LEAKY) {
#pragma omp parallel for
for (i = 0; i < n; ++i) {
x[i] = leaky_activate(x[i]);
}
}
else if (a == LOGISTIC) {
#pragma omp parallel for
for (i = 0; i < n; ++i) {
x[i] = logistic_activate(x[i]);
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void activate_array_swish(float *x, const int n, float * output_sigmoid, float * output)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
float x_val = x[i];
float sigmoid = logistic_activate(x_val);
output_sigmoid[i] = sigmoid;
output[i] = x_val * sigmoid;
}
}
// https://github.com/digantamisra98/Mish
void activate_array_mish(float *x, const int n, float * activation_input, float * output)
{
const float MISH_THRESHOLD = 20;
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
float x_val = x[i];
activation_input[i] = x_val; // store value before activation
output[i] = x_val * tanh_activate( softplus_activate(x_val, MISH_THRESHOLD) );
}
}
void activate_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *output)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) sum += val;
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) val = val / sum;
else val = 0;
output[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
}
void activate_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *output)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
sum += expf(val);
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
val = expf(val) / sum;
output[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
}
void gradient_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *delta)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
//const float eps = 0.0001;
if (i < size) {
float grad = 0;// eps;
int k;
for (k = 0; k < channels; ++k) {
float out = x[wh_i + k * wh_step + b*wh_step*channels];
float d = delta[wh_i + k * wh_step + b*wh_step*channels];
grad += out*d;
}
for (k = 0; k < channels; ++k) {
float d = delta[wh_i + k * wh_step + b*wh_step*channels];
d = d * grad;
delta[wh_i + k * wh_step + b*wh_step*channels] = d;
}
}
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case NORM_CHAN:
return relu_gradient(x);
case NORM_CHAN_SOFTMAX:
printf(" Error: should be used custom NORM_CHAN_SOFTMAX-function for gradient \n");
exit(0);
return 0;
case ELU:
return elu_gradient(x);
case SELU:
return selu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
{
int i;
#pragma omp parallel for
for(i = 0; i < n; ++i){
delta[i] *= gradient(x[i], a);
}
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cpp#L54-L56
void gradient_array_swish(const float *x, const int n, const float * sigmoid, float * delta)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
float swish = x[i];
delta[i] *= swish + sigmoid[i]*(1 - swish);
}
}
// https://github.com/digantamisra98/Mish
void gradient_array_mish(const int n, const float * activation_input, float * delta)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/commit/093cdfa85d334cbe19a37624c33198f3140109ed
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
float inp = activation_input[i];
const float sp = softplus_activate(inp, MISH_THRESHOLD);
const float grad_sp = 1 - exp(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
}
|
dem_fem_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: msantasusana, croig $
// Date: $Date: 2015-10-26 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_DEM_FEM_SEARCH_H_INCLUDED )
#define KRATOS_DEM_FEM_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "utilities/openmp_utils.h"
// Configures
#include "rigid_face_geometrical_object_configure.h"
// Search
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
// External includes
//#define CUSTOMTIMER
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class KRATOS_API(DEM_APPLICATION) DEM_FEM_Search : public SpatialSearch
{
public:
///@name Type Definitions
///@{
/// Pointer definition of OMP_DEMSearch
KRATOS_CLASS_POINTER_DEFINITION(DEM_FEM_Search);
typedef PointType* PtrPointType;
typedef std::vector<PtrPointType>* PointVector;
typedef std::vector<PtrPointType>::iterator PointIterator;
typedef double* DistanceVector;
typedef double* DistanceIterator;
// //Configure Types
// typedef RigidFaceConfigure<3> ElementConfigureType; //Element
// //Bin Types
// typedef BinsObjectDynamic<ElementConfigureType> BinsType;
//
//Configure Types
typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType;
//Bin Types
typedef BinsObjectDynamic<RigidFaceGeometricalConfigureType> GeometricalBinsType;
//typedef PointerVectorSet<GeometricalObject, IndexedObject> GeometricalObjectType;
typedef typename RigidFaceGeometricalConfigureType::ElementsContainerType GeometricalObjectType;
//typedef PointerVector<GeometricalObject> GeometricalObjectType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
DEM_FEM_Search(){
mBins = NULL;
}
/// Destructor.
~DEM_FEM_Search(){
}
void SearchRigidFaceForDEMInRadiusExclusiveImplementation (
ElementsContainerType const& rElements,
ConditionsContainerType const& rConditions,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance)
{
KRATOS_TRY
/*
STEPS:
¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨
1. INITIALIZE
2. CALCULATE THE DEM BBX
3. GATHER DEM BOUNDING BOX
4. FIND THE FE INSIDE DEM_BB TO BUILD THE BINS AND CONSTRUCT THE GLOBAL BBX
5. GATHER FEM ELEMENTS AND THE GLOBAL BBX
6. BUILD THE BINS
7. PERFORM THE SEARCH FOR THE SPHERES INSIDE THE BBX (amplified by the radius of each particle)
*/
//1. INITIALIZE
int MaxNumberOfElements = rConditions.size();
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
ConditionsContainerType::ContainerType& conditions_bins = const_cast<ConditionsContainerType::ContainerType&>(rConditions.GetContainer());
//GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsConditionPointerToGeometricalObjecPointerTemporalVector;
RadiusArrayType Radius_out;
int num_of_threads = ParallelUtilities::GetNumThreads();
std::vector<unsigned int> total_fem_partition_index;
OpenMPUtils::CreatePartition(num_of_threads, conditions_bins.size(), total_fem_partition_index);
//std::vector<GeometricalObjectType::ContainerType> Vector_SearElementPointerToGeometricalObjecPointerTemporalVector(num_of_threads);
std::vector<GeometricalObjectType::ContainerType> Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector(num_of_threads);
std::vector<array_1d<double, 3> > Vector_DEM_BB_LowPoint(num_of_threads); std::vector <array_1d<double, 3 > > Vector_DEM_BB_HighPoint(num_of_threads);
std::vector<array_1d<double, 3> > Vector_GLOBAL_BB_LowPoint(num_of_threads); std::vector <array_1d<double, 3 > > Vector_GLOBAL_BB_HighPoint(num_of_threads);
std::vector<double> Vector_Ref_Radius(num_of_threads);
std::vector<RadiusArrayType> Vector_Radius_out(num_of_threads);
double Global_Ref_Radius = 0.0;
double inf = std::numeric_limits<double>::infinity();
for (std::size_t i = 0; i < 3; i++) {
DEM_BB_LowPoint[i] = inf;
DEM_BB_HighPoint[i] = -inf;
mGlobal_BB_LowPoint[i] = inf;
mGlobal_BB_HighPoint[i] = -inf;
}
typedef ElementsContainerType::ContainerType::iterator Elem_iter;
typedef ConditionsContainerType::ContainerType::iterator Cond_iter;
//2. CALCULATE THE DEM BBX
#pragma omp parallel
{
double radius = 0.0;
int k = OpenMPUtils::ThisThread();
for(std::size_t i = 0; i < 3; i++) {
Vector_DEM_BB_LowPoint[k][i] = inf;
Vector_DEM_BB_HighPoint[k][i] = -inf;
}
#pragma omp for
for (int p = 0; p <(int) elements_sear.size(); p++) {
Elem_iter it = elements_sear.begin() + p;
GeometryType& pGeometry = (*it)->GetGeometry();
const array_1d<double, 3 >& aux_coor = pGeometry[0].Coordinates();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>((*it).get());
radius = p_particle->GetSearchRadius();
Vector_Ref_Radius[k] = (Vector_Ref_Radius[k] < radius) ? radius : Vector_Ref_Radius[k] ;
for(std::size_t i = 0; i < 3; i++) {
Vector_DEM_BB_LowPoint[k][i] = (Vector_DEM_BB_LowPoint[k][i] > aux_coor[i]) ? aux_coor[i] : Vector_DEM_BB_LowPoint[k][i];
Vector_DEM_BB_HighPoint[k][i] = (Vector_DEM_BB_HighPoint[k][i] < aux_coor[i]) ? aux_coor[i] : Vector_DEM_BB_HighPoint[k][i];
}
} //pragma
}//pragma parallel
//3. GATHER DEM BOUNDING BOX
for(int k = 0; k < num_of_threads; k++) {
for(std::size_t i = 0; i < 3; i++) {
DEM_BB_LowPoint[i] = (DEM_BB_LowPoint[i] > Vector_DEM_BB_LowPoint[k][i]) ? Vector_DEM_BB_LowPoint[k][i] : DEM_BB_LowPoint[i];
DEM_BB_HighPoint[i] = (DEM_BB_HighPoint[i] < Vector_DEM_BB_HighPoint[k][i]) ? Vector_DEM_BB_HighPoint[k][i] : DEM_BB_HighPoint[i];
}
Global_Ref_Radius = (Global_Ref_Radius < Vector_Ref_Radius[k]) ? Vector_Ref_Radius[k] : Global_Ref_Radius;
}
for(std::size_t i = 0; i < 3; i++) {
DEM_BB_LowPoint[i] -= 1.00f * Global_Ref_Radius;
DEM_BB_HighPoint[i] += 1.00f * Global_Ref_Radius;
}
//4. FIND THE FE INSIDE DEM_BB TO BUILD THE BINS AND CONSTRUCT THE GLOBAL BBX
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].reserve(total_fem_partition_index[k+1]);
for(std::size_t i = 0; i < 3; i++) {
Vector_GLOBAL_BB_LowPoint[k][i] = inf;
Vector_GLOBAL_BB_HighPoint[k][i] = -inf;
}
array_1d<double, 3> rHighPoint;
array_1d<double, 3> rLowPoint;
#pragma omp for private(rHighPoint,rLowPoint)
for (int c = 0; c < (int)conditions_bins.size(); c++) {
Cond_iter it = conditions_bins.begin() + c;
const GeometryType& pGeometry = (*it)->GetGeometry();
noalias(rLowPoint) = pGeometry[0];
noalias(rHighPoint) = pGeometry[0];
for(unsigned int point = 1; point < pGeometry.size(); point++ ) {
for(unsigned int i = 0; i < 3; i++ ) {
rHighPoint[i] = ( rHighPoint[i] < pGeometry[point][i] ) ? pGeometry[point][i] : rHighPoint[i];
rLowPoint[i] = ( rLowPoint[i] > pGeometry[point][i] ) ? pGeometry[point][i] : rLowPoint[i];
}
}
bool add = true;
for(unsigned int i = 0; i < 3; i++) {
if(( rHighPoint[i] < DEM_BB_LowPoint[i] ) || ( rLowPoint[i] > DEM_BB_HighPoint[i] )) {
add = false;
break;
}
}
if(add) {
for(unsigned int i = 0; i < 3; i++ ) {
Vector_GLOBAL_BB_LowPoint[k][i] = (Vector_GLOBAL_BB_LowPoint[k][i] > rLowPoint[i]) ? rLowPoint[i] : Vector_GLOBAL_BB_LowPoint[k][i];
Vector_GLOBAL_BB_HighPoint[k][i] = (Vector_GLOBAL_BB_HighPoint[k][i] < rHighPoint[i]) ? rHighPoint[i] : Vector_GLOBAL_BB_HighPoint[k][i];
}
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].push_back(*it);
}
}//parallel for
}//parallel omp
//5. GATHER FEM ELEMENTS AND THE GLOBAL BBX
int fem_total_size = 0;
for(int k = 0; k < num_of_threads; k++) {
fem_total_size += Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].size();
}
BinsConditionPointerToGeometricalObjecPointerTemporalVector.reserve(fem_total_size);
for(int k = 0; k < num_of_threads; k++) {
BinsConditionPointerToGeometricalObjecPointerTemporalVector.insert(
BinsConditionPointerToGeometricalObjecPointerTemporalVector.end(),
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].begin(),
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].end()
);
for(std::size_t i = 0; i < 3; i++) {
mGlobal_BB_LowPoint[i] = (mGlobal_BB_LowPoint[i] > Vector_GLOBAL_BB_LowPoint[k][i]) ? Vector_GLOBAL_BB_LowPoint[k][i] : mGlobal_BB_LowPoint[i];
mGlobal_BB_HighPoint[i] = (mGlobal_BB_HighPoint[i] < Vector_GLOBAL_BB_HighPoint[k][i]) ? Vector_GLOBAL_BB_HighPoint[k][i] : mGlobal_BB_HighPoint[i];
}
}
if(BinsConditionPointerToGeometricalObjecPointerTemporalVector.size() >0 ) {
//6. CREATE THE BINS
//if (mBins) free(mBins);
delete mBins;
mBins = new GeometricalBinsType(BinsConditionPointerToGeometricalObjecPointerTemporalVector.begin(), BinsConditionPointerToGeometricalObjecPointerTemporalVector.end());
//7. PERFORM THE SEARCH ON THE SPHERES
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for schedule(dynamic, 100)
for (int p = 0; p < (int)elements_sear.size(); p++) {
Elem_iter it = elements_sear.begin() + p;
GeometricalObject::Pointer go_it(*it);
bool search_particle = true;
array_1d<double, 3 > & aux_coor = go_it->GetGeometry()[0].Coordinates();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>((*it).get());
double Rad = p_particle->GetSearchRadius();
for(unsigned int i = 0; i < 3; i++ ) {
search_particle &= !(aux_coor[i] < (mGlobal_BB_LowPoint[i] - Rad) ) || (aux_coor[i] > (mGlobal_BB_HighPoint[i] + Rad) ); //amplify the BBX with the radius for every particle
}
if(search_particle) {
auto ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = (*mBins).SearchObjectsInRadiusExclusive(go_it,Rad,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[p].reserve(NumberOfResults);
for(auto c_it = localResults.begin(); c_it != localResults.begin() + NumberOfResults; c_it++) {
auto presult = *c_it;
Condition::Pointer condition = dynamic_pointer_cast<Condition>(presult);
//Condition::Pointer condition = Kratos::dynamic_pointer_cast<Condition>(*c_it);
rResults[p].push_back(condition);
}
rResultsDistance[p].insert(rResultsDistance[p].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
} //loop elements
} //parallel
}//if Bins is not empty--> search
KRATOS_CATCH("")
}
array_1d<double, 3 > GetBBHighPoint() {
return (mGlobal_BB_HighPoint);
}
array_1d<double, 3 > GetBBLowPoint() {
return (mGlobal_BB_LowPoint);
}
/// Turn back information as a string.
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "DEM_FEM_Search" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "DEM_FEM_Search";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
array_1d<double, 3 > mGlobal_BB_HighPoint;
array_1d<double, 3 > mGlobal_BB_LowPoint;
array_1d<double, 3 > DEM_BB_HighPoint;
array_1d<double, 3 > DEM_BB_LowPoint;
GeometricalBinsType* mBins;
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
DEM_FEM_Search& operator=(DEM_FEM_Search const& rOther)
{
return *this;
}
/// Copy constructor.
DEM_FEM_Search(DEM_FEM_Search const& rOther)
{
*this = rOther;
}
}; // Class DEM_FEMSearch
} // namespace Kratos.
#endif // KRATOS_DEM_FEM_SEARCH_H_INCLUDED defined
|
shuffle.c | // TODO: see http://arxiv.org/pdf/1508.03167.pdf
// https://github.com/axel-bacher/mergeshuffle
#include <stdlib.h>
#include <stdio.h>
#include <limits.h>
#include <string.h>
#include <sys/types.h>
#include <stdint.h>
#include <x86intrin.h>
#define RDTSC_START(cycles) \
do { \
register unsigned cyc_high, cyc_low; \
__asm volatile("cpuid\n\t" \
"rdtsc\n\t" \
"mov %%edx, %0\n\t" \
"mov %%eax, %1\n\t" \
: "=r" (cyc_high), "=r" (cyc_low) \
:: "%rax", "%rbx", "%rcx", "%rdx"); \
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
} while (0)
#define RDTSC_FINAL(cycles) \
do { \
register unsigned cyc_high, cyc_low; \
__asm volatile("rdtscp\n\t" \
"mov %%edx, %0\n\t" \
"mov %%eax, %1\n\t" \
"cpuid\n\t" \
: "=r" (cyc_high), "=r" (cyc_low) \
:: "%rax", "%rbx", "%rcx", "%rdx"); \
(cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \
} while(0)
/********
* Next part is mersenne Twister
*********/
#include "mersenne.c"
/**************
* next bit is from
* https://github.com/axel-bacher/mergeshuffle/
****/
struct random {
unsigned long x;
int c;
};
// mark as used the first b bits of r->x
// they should be shifted out after use (r->x >>= b)
static inline void consume_bits(struct random *r, int b) {
#ifdef COUNT
count += b;
#endif
r->c -= b;
if(r->c < 0) {
r->x = rand64();
r->c = 64 - b;
}
}
static inline unsigned long random_bits(struct random *r, unsigned int i) {
consume_bits(r, i);
int y = r->x & ((1UL << i) - 1);
r->x >>= i;
return y;
}
static inline int flip(struct random *r) {
return random_bits(r, 1);
}
static inline unsigned long random_int(struct random *r, unsigned long n) {
unsigned long v = 1;
unsigned long d = 0;
while(1) {
d += d + flip(r);
v += v;
if(v >= n) {
if(d < n) return d;
v -= n;
d -= n;
}
}
}
static inline void swap(unsigned int *a, unsigned int *b) {
unsigned int x = *a;
unsigned int y = *b;
*a = y;
*b = x;
}
void fisher_yates(unsigned int *t, unsigned int n) {
struct random r = {0, 0};
unsigned int i;
for(i = 0; i < n; i ++) {
unsigned int j = random_int(&r, i + 1);
swap(t + i, t + j);
}
}
void merge(unsigned int *t, unsigned int m, unsigned int n) {
unsigned int *u = t;
unsigned int *v = t + m;
unsigned int *w = t + n;
struct random r = {0, 0};
// take elements from both halves until one is exhausted
while(1) {
if(flip(&r)) {
if(v == w) break;
swap(u, v ++);
} else if(u == v) break;
u ++;
}
// finish using Fisher-Yates
while(u < w) {
unsigned int i = random_int(&r, u - t + 1);
swap(t + i, u ++);
}
}
unsigned long cutoff = 0x10000;
void mergeshuffle(unsigned int *t, unsigned int n) {
unsigned int c = 0;
while((n >> c) > cutoff) c ++;
unsigned int q = 1 << c;
unsigned long nn = n;
unsigned int i,p;
// #pragma omp parallel for
for(i = 0; i < q; i ++) {
unsigned long j = nn * i >> c;
unsigned long k = nn * (i+1) >> c;
fisher_yates(t + j, k - j);
}
for(p = 1; p < q; p += p) {
// #pragma omp parallel for
for(i = 0; i < q; i += 2*p) {
unsigned long j = nn * i >> c;
unsigned long k = nn * (i + p) >> c;
unsigned long l = nn * (i + 2*p) >> c;
merge(t + j, k - j, l - j);
}
}
}
/****
* End of import from
* https://github.com/axel-bacher/mergeshuffle/
**************/
uint32_t round2 (uint32_t v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
uint32_t fairRandomInt(uint32_t size) {
uint32_t candidate, rkey;
// such a loop is necessary for the result to be fair
rkey = fastrand();
candidate = rkey % size;
// NOTE: RAND_MAX should be actual max value
while(rkey - candidate > RAND_MAX - size + 1 ) { // will be predicted as false
rkey = fastrand();
candidate = rkey % size;
}
return candidate;
}
// Fisher-Yates shuffle, shuffling an array of integers
void shuffle(int *storage, size_t size) {
size_t i;
for (i=size; i>1; i--) {
size_t nextpos = fairRandomInt(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
uint32_t fastround2 (uint32_t v) {
return 1 << (32 - __builtin_clz(v-1));
}
uint32_t fastFairRandomInt(randbuf_t * rb, uint32_t size, uint32_t mask, uint32_t bused) {
uint32_t candidate;
candidate = grabBits( rb, mask,bused );
// such a loop is necessary for the result to be fair
while(candidate >= size) {
candidate = grabBits( rb, mask,bused );
}
return candidate;
}
uint32_t ranged_random_mult_lazy(uint32_t range) {
uint64_t random32bit, multiresult;
uint32_t leftover;
uint32_t threshold;
random32bit = fastrand();
if((range & (range - 1)) == 0) {
return random32bit & (range - 1);
}
if(range >0x80000000) {// if range > 1<<31
while(random32bit >= range) {
random32bit = fastrand();
}
return random32bit; // [0, range)
}
multiresult = random32bit * range;
leftover = (uint32_t) multiresult;
if(leftover < range ) {
threshold = 0xFFFFFFFF % range ;
while (leftover <= threshold) {
random32bit = fastrand();
multiresult = random32bit * range;
leftover = (uint32_t) multiresult;
}
}
return multiresult >> 32; // [0, range)
}
// Fisher-Yates shuffle, shuffling an array of integers
void shuffle_fastmult(int *storage, uint32_t size) {
uint32_t i;
for (i=size; i>1; i--) {
uint32_t nextpos = ranged_random_mult_lazy(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
// Fisher-Yates shuffle, shuffling an array of integers
void oldfast_shuffle(int *storage, size_t size) {
size_t i;
uint32_t bused = 32 - __builtin_clz(size);
uint32_t m2 = 1 << (32- __builtin_clz(size-1));
i=size;
randbuf_t rb;
rbinit(&rb);
while(i>1) {
for (; 2*i>m2; i--) {
size_t nextpos = fastFairRandomInt(&rb, i, m2-1,bused);//
int tmp = storage[i - 1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
m2 = m2 >> 1;
bused--;
}
}
void fast_shuffle(int *storage, size_t size) {
size_t i;
i=size;
while(i>1) {
uint32_t nextpos = standard_ranged(i);//
int tmp = storage[i - 1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
i--;
}
}
void fast_shuffle2(int *storage, size_t size) {
size_t i;
i=size;
while(i>1) {
uint32_t nextpos = standard_ranged2(i);//
int tmp = storage[i - 1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
i--;
}
}
// Fisher-Yates shuffle, shuffling an array of integers
void shuffle_float(int *storage, uint32_t size) {
uint32_t i;
for (i=size; i>1; i--) {
uint32_t nextpos = (uint32_t)(fastranddouble() * i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
// Fisher-Yates shuffle, shuffling an array of integers
void fast_shuffle_floatapprox(int *storage, size_t size) {
/**
supposedly, you can map a 64-bit random int v to a double by doing this:
v * (1.0/18446744073709551616.0L);
so to get a number between 0 and x, you just multiply this by x?
But this is bogus.
* */
size_t i;
for(i=size; i>1; i--) {
uint32_t nextpos = (uint32_t)(fastrand64() * i * (1.0/18446744073709551616.0L));//
int tmp = storage[i - 1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
// Random Permutations on Distributed External and Hierarchical Memory by Peter Sanders
void shuffle_sanders(int *storage, size_t size, size_t buffersize) {
size_t i;
size_t l;
size_t NBR_BLOCK = round2((size +buffersize -1)/buffersize);
size_t BLOCK_SIZE = 4 * buffersize;
size_t * counter = malloc(NBR_BLOCK * sizeof(size_t));
for(i = 0 ; i < NBR_BLOCK; ++i)
counter[i] = 0;
int* buffer = malloc(BLOCK_SIZE * NBR_BLOCK * sizeof(int));
for(i = 0; i < size; i++) {
int block = rand() & ( NBR_BLOCK - 1) ; // NBR_BLOCK is a power of two
buffer[BLOCK_SIZE * block + counter[block]] = storage[i];
counter[block]++;
if(counter[block]>=BLOCK_SIZE) printf("insufficient mem\n");
}
l = 0;
for(i = 0; i < NBR_BLOCK; i++) {
shuffle(buffer + BLOCK_SIZE * i, counter[i]);
memcpy(storage + l,buffer + BLOCK_SIZE * i,counter[i]*sizeof(int));
l += counter[i];
}
free(buffer);
free(counter);
}
//Fisher-Yates shuffle, shuffling an array of integers
void shuffle_prefetch2(int *storage, size_t size) {
size_t i;
i = size;
if(size > 2) {
size_t nextposs[2];
nextposs[(i)%2] = fairRandomInt(i);
nextposs[(i-1)%2] = fairRandomInt(i - 1);
for (; i>2; i--) {
int thisindex = i % 2;
size_t nextpos = nextposs[thisindex];
// prefetching part
size_t np = fairRandomInt(i-2);
nextposs[thisindex] = np;
__builtin_prefetch(storage + np);
// end of prefetching
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
for (; i>1; i--) {
size_t nextpos = fairRandomInt(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
//Fisher-Yates shuffle, shuffling an array of integers
void shuffle_prefetch4(int *storage, size_t size) {
size_t i;
i = size;
if(size > 4) {
size_t nextposs[4];
nextposs[i%4] = fairRandomInt(i);
nextposs[(i-1)%4] = fairRandomInt(i - 1);
nextposs[(i-2)%4] = fairRandomInt(i - 2);
nextposs[(i-3)%4] = fairRandomInt(i - 3);
for (; i>4; i--) {
int thisindex = i % 4;
size_t nextpos = nextposs[thisindex];
// prefetching part
size_t np = fairRandomInt(i-4);
nextposs[thisindex] = np;
__builtin_prefetch(storage + np);
// end of prefetching
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
for (; i>1; i--) {
size_t nextpos = fairRandomInt(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
//Fisher-Yates shuffle, shuffling an array of integers
void shuffle4(int *storage, size_t size) {
size_t i;
i = size;
if(size > 4) {
size_t nextposs[4];
int vals[4];
for (; i>4; i-=4) {
nextposs[0] = fairRandomInt(i);
nextposs[1] = fairRandomInt(i - 1);
nextposs[2] = fairRandomInt(i - 2);
nextposs[3] = fairRandomInt(i - 3);
vals[0] = storage[nextposs[0]];
vals[1] = storage[nextposs[1]];
vals[2] = storage[nextposs[2]];
vals[3] = storage[nextposs[3]];
storage[nextposs[0]] = storage[i - 1];
storage[nextposs[1]] = storage[i - 2];
storage[nextposs[2]] = storage[i - 3];
storage[nextposs[3]] = storage[i - 4];
storage[i - 1] = vals[0];
storage[i - 2] = vals[1];
storage[i - 3] = vals[2];
storage[i - 4] = vals[3];
}
}
for (; i>1; i--) {
size_t nextpos = fairRandomInt(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
//Fisher-Yates shuffle, shuffling an array of integers
void shuffle_prefetch8(int *storage, size_t size) {
size_t i,j ;
i = size;
if(size > 8) {
size_t nextposs[8];
for(j = 0; j < 8; ++j)
nextposs[(i-j)%8] = fairRandomInt(i-j);
for (; i>8; i--) {
int thisindex = i % 8;
size_t nextpos = nextposs[thisindex];
// prefetching part
size_t np = fairRandomInt(i-8);
nextposs[thisindex] = np;
__builtin_prefetch(storage + np);
// end of prefetching
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
for (; i>1; i--) {
size_t nextpos = fairRandomInt(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
//Fisher-Yates shuffle, shuffling an array of integers
void shuffle_prefetch16(int *storage, size_t size) {
size_t i,j;
i = size;
if(size > 16) {
size_t nextposs[16];
for(j = 0; j < 16; ++j)
nextposs[(i-j)%16] = fairRandomInt(i-j);
for (; i>16; i--) {
int thisindex = i % 16;
size_t nextpos = nextposs[thisindex];
// prefetching part
size_t np = fairRandomInt(i-16);
nextposs[thisindex] = np;
__builtin_prefetch(storage + np);
// end of prefetching
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
for (; i>1; i--) {
size_t nextpos = fairRandomInt(i);
int tmp = storage[i-1];// likely in cache
int val = storage[nextpos]; // could be costly
storage[i - 1] = val;
storage[nextpos] = tmp; // you might have to read this store later
}
}
// Random Permutations on Distributed External and Hierarchical Memory by Peter Sanders
void shuffle_sanders_prefetch16(int *storage, size_t size, size_t buffersize) {
size_t i;
size_t l;
size_t NBR_BLOCK = round2((size +buffersize -1)/buffersize);
size_t BLOCK_SIZE = 4 * buffersize;
size_t * counter = malloc(NBR_BLOCK * sizeof(size_t));
for(i = 0 ; i < NBR_BLOCK; ++i)
counter[i] = 0;
int* buffer = malloc(BLOCK_SIZE * NBR_BLOCK * sizeof(int));
for(i = 0; i < size; i++) {
int block = rand() & ( NBR_BLOCK - 1) ; // NBR_BLOCK is a power of two
buffer[BLOCK_SIZE * block + counter[block]] = storage[i];
counter[block]++;
if(counter[block]>=BLOCK_SIZE) printf("insufficient mem\n");
}
l = 0;
for(i = 0; i < NBR_BLOCK; i++) {
shuffle_prefetch16(buffer + BLOCK_SIZE * i, counter[i]);
memcpy(storage + l,buffer + BLOCK_SIZE * i,counter[i]*sizeof(int));
l += counter[i];
}
free(buffer);
free(counter);
}
int compare( const void* a, const void* b)
{
int int_a = * ( (int*) a );
int int_b = * ( (int*) b );
if ( int_a == int_b ) return 0;
else if ( int_a < int_b ) return -1;
else return 1;
}
int demo(size_t array_size) {
int bogus = 0;
size_t i;
float cycles_per_search1;
int *array = (int *) malloc( array_size * sizeof(int) );
uint64_t cycles_start, cycles_final;
#ifdef USE_GENERIC
printf("Using some basic random number generator\n");
#elif USE_SIMDMT
printf("Using SIMD Mersenne Twister\n");
#elif USE_MT
printf("Using Mersenne Twister\n");
#elif USE_PCG
printf("Using PCG\n");
#elif USE_RAND
printf("Using rand\n");
#elif USE_HARDWARE
printf("Using hardware\n");
#else
printf("Using SIMD Mersenne Twister\n");
#endif
printf("populating array %zu \n",array_size);
printf("log(array_size) = %d \n",(33 - __builtin_clz(array_size-1)));
for(i = 0; i < array_size; ++i) {
array[i] = i;
}
x=1;
seedMT(x);
printf("\n");
RDTSC_START(cycles_start);
shuffle( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("normal shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_float( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("normal shuffle with floating points cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
fast_shuffle( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("fast shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
fast_shuffle2( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("fast shuffle 2 cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_fastmult( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("fast mult shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
fisher_yates((unsigned int *) array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("from https://github.com/axel-bacher/mergeshuffle/ shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
mergeshuffle((unsigned int *) array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("from https://github.com/axel-bacher/mergeshuffle/ mergeshuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle4( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("shuffle4 cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_sanders( array, array_size, 24000 );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("sanders shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_prefetch2( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("prefetch 2 shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_prefetch4( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("prefetch 4 shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_prefetch8( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("prefetch 8 shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_prefetch16( array, array_size );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("prefetch 16 shuffle cycles per key %.2f \n", cycles_per_search1);
RDTSC_START(cycles_start);
shuffle_sanders_prefetch16( array, array_size, 24000 );
bogus += array[0];
RDTSC_FINAL(cycles_final);
cycles_per_search1 =
( cycles_final - cycles_start) / (float) (array_size);
printf("sanders with prefetch 16 shuffle cycles per key %.2f \n", cycles_per_search1);
free(array);
return bogus;
}
int testfairness(uint32_t maxsize) {
printf("checking your RNG.\n");
uint32_t size;
for(size = 100; size <maxsize; ++size) {
uint32_t i;
uint32_t m2 = 1 << (32- __builtin_clz(size-1));
double ratio = (double) size / m2;
uint32_t mask = m2 -1;
uint32_t count = 10000;
double predicted = (1-ratio) * count;
uint32_t missed = 0;
for(i = 0 ; i < count; ++i ) {
if((fastrand() & mask) >= size) ++missed;
}
if((double)missed > 1.1 * predicted + 20) {
printf("size = %d missed = %d predicted = %f\n",size, missed, predicted);
printf("poor RNG \n");
return -1;
}
}
return 0;
}
int main( int argc, char **argv ) {
int bogus = 0;
size_t array_size;
int r;
init_gen_rand(0); // simd mersenne
r = testfairness(1000);
if(r == 0)
printf ("good RNG\n");
else return r;
for(array_size = 4096; array_size < 2147483648; array_size*=8) {
bogus += demo(array_size);
printf("\n");
}
return bogus;
}
|
sections_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp sections
{
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
}
void foo();
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp sections'}}
#pragma omp sections
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp sections'}}
#pragma omp sections foo
void test_no_clause() {
int i;
#pragma omp sections
{
foo();
}
// expected-error@+2 {{the statement for '#pragma omp sections' must be a compound statement}}
#pragma omp sections
++i;
#pragma omp sections
{
foo();
foo(); // expected-error {{statement in 'omp sections' directive must be enclosed into a section region}}
}
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp sections
{
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
#pragma omp section
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L3;
else if (i == 8) {
L3:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
goto L3; // expected-error {{use of undeclared label 'L3'}}
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}}
#pragma omp sections foo bar
{
foo();
// expected-error@+1 {{unexpected OpenMP clause 'nowait' in directive '#pragma omp section'}}
#pragma omp section nowait
;
}
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}}
#pragma omp sections;
{
foo();
}
#pragma omp parallel
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp sections'}}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}}
#pragma omp sections linear(x);
{
foo();
}
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}}
#pragma omp sections private(x);
{
foo();
}
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}}
#pragma omp sections, private(x);
{
foo();
}
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp sections private(
{
foo();
}
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp sections private(,
{
foo();
}
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp sections private(, )
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp sections private()
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp sections private(int)
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp sections private(0)
{
foo();
}
int x, y, z;
#pragma omp parallel
#pragma omp sections private(x)
{
foo();
}
#pragma omp parallel
#pragma omp sections private(x, y)
{
foo();
}
#pragma omp parallel
#pragma omp sections private(x, y, z)
{
foo();
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp sections lastprivate(
{
foo();
}
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp sections lastprivate(,
{
foo();
}
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp sections lastprivate(, )
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp sections lastprivate()
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp sections lastprivate(int)
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp sections lastprivate(0)
{
foo();
}
int x, y, z;
#pragma omp parallel
#pragma omp sections lastprivate(x)
{
foo();
}
#pragma omp parallel
#pragma omp sections lastprivate(x, y)
{
foo();
}
#pragma omp parallel
#pragma omp sections lastprivate(x, y, z)
{
foo();
}
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp sections firstprivate(
{
foo();
}
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp sections firstprivate(,
{
foo();
}
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp sections firstprivate(, )
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp sections firstprivate()
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp sections firstprivate(int)
{
foo();
}
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp sections firstprivate(0)
{
foo();
}
int x, y, z;
#pragma omp parallel
#pragma omp sections lastprivate(x) firstprivate(x)
{
foo();
}
#pragma omp parallel
#pragma omp sections lastprivate(x, y) firstprivate(x, y)
{
foo();
}
#pragma omp parallel
#pragma omp sections lastprivate(x, y, z) firstprivate(x, y, z)
{
foo();
}
}
void test_nowait() {
#pragma omp parallel
#pragma omp sections nowait nowait // expected-error {{directive '#pragma omp sections' cannot contain more than one 'nowait' clause}}
{
;
}
}
|
mujoco_derivatives_struct.c | // -*- evil-shift-width: 4 -*-
/* Copyright © 2018, Roboti LLC
This file is licensed under the MuJoCo Resource License (the "License").
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.roboti.us/resourcelicense.txt
*/
#include "mujoco.h"
#include <stdio.h>
#include <errno.h> /* errno, perror */
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <assert.h>
#include "mujoco_derivatives_struct.h"
// global variables: user-defined, with defaults
#define MAXTHREAD 64 // maximum number of threads allowed
#define MAXSTATEN 8 // max allowed state members
// enable compilation with and without OpenMP support
#if defined(_OPENMP)
#include <omp.h>
#else
// omp timer replacement
double omp_get_wtime(void)
{
struct timeval start;
struct timezone tz;
gettimeofday(&start, &tz);
struct timeval end;
gettimeofday(&end, &tz);
return (double)start.tv_sec + 1e-6 * (double)start.tv_usec;
}
// omp functions used below
void omp_set_dynamic(int x) {}
void omp_set_num_threads(int x) {}
int omp_get_num_procs(void) {return 1;}
#endif
mjtNum* alloc_deriv(const mjModel* m)
{
// allocate derivatives
return (mjtNum*) mju_malloc(6*sizeof(mjtNum)*m->nv*m->nv);
}
void mj_copyStateCtrlData(mjData* d, const mjModel* m, const mjData* dmain) {
d->time = dmain->time;
mju_copy(d->qpos, dmain->qpos, m->nq);
mju_copy(d->qvel, dmain->qvel, m->nv);
mju_copy(d->qacc, dmain->qacc, m->nv);
mju_copy(d->qacc_warmstart, dmain->qacc_warmstart, m->nv);
mju_copy(d->qfrc_applied, dmain->qfrc_applied, m->nv);
mju_copy(d->xfrc_applied, dmain->xfrc_applied, 6*m->nbody);
mju_copy(d->ctrl, dmain->ctrl, m->nu);
}
////////////////////////////////////////////////////////////////////////////////
// mjData Getters
////////////////////////////////////////////////////////////////////////////////
#define MJDATA_GET_PTR(attr) &mjd_get_ ## attr
#define MJDATA_GETTER(attr) \
mjtNum* mjd_get_ ## attr(const mjData* d) \
{ \
return d-> attr; \
}
MJDATA_GETTER(ctrl)
MJDATA_GETTER(qpos)
MJDATA_GETTER(qvel)
MJDATA_GETTER(qacc)
MJDATA_GETTER(qfrc_inverse)
MJDATA_GETTER(qfrc_applied)
void mjDGetters_free(mjDGetters* g)
{
mju_free(g->fs);
mju_free(g);
}
mjDGetter* mjd_enum2getter(const MJDGetter_enum mjdg)
{
switch (mjdg)
{
case MJDGetter_ctrl:
return MJDATA_GET_PTR(ctrl);
case MJDGetter_qpos:
return MJDATA_GET_PTR(qpos);
case MJDGetter_qvel:
return MJDATA_GET_PTR(qvel);
case MJDGetter_qacc:
return MJDATA_GET_PTR(qacc);
case MJDGetter_qfrc_inverse:
return MJDATA_GET_PTR(qfrc_inverse);
case MJDGetter_qfrc_applied:
return MJDATA_GET_PTR(qfrc_applied);
default:
printf("Error: Bad enum %d", mjdg);
exit(EXIT_FAILURE);
break;
}
}
char* mjd_getter2str(mjDGetter* mjdg)
{
if (MJDATA_GET_PTR(ctrl) == mjdg)
return "ctrl";
else if (MJDATA_GET_PTR(qpos) == mjdg)
return "qpos";
else if (MJDATA_GET_PTR(qvel) == mjdg)
return "qvel";
else if (MJDATA_GET_PTR(qacc) == mjdg)
return "qacc";
else if (MJDATA_GET_PTR(qfrc_inverse) == mjdg)
return "qfrc_inverse";
else if (MJDATA_GET_PTR(qfrc_applied) == mjdg)
return "qfrc_applied";
else {
printf("Error: Bad getter %p", (void*) mjdg);
exit(EXIT_FAILURE);
}
}
void mjDGetters_print(const mjDGetters* mjdg)
{
for (size_t i = 0; i < mjdg->len; i++)
printf("%s %s", i ? "," : "", mjd_getter2str(mjdg->fs[i]));
printf("\n");
}
mjDGetters* mjDGetters_new(size_t len, MJDGetter_enum* attr)
{
mjDGetters* g = (mjDGetters*) mju_malloc(sizeof(mjDGetters));
g->len = len;
g->fs = (mjDGetter**) mju_malloc(len * sizeof(mjDGetter*));
for (size_t i = 0; i < len; i++)
g->fs[i] = mjd_enum2getter(attr[i]);
g->free = &mjDGetters_free;
return g;
}
// mj_warmup_t
void mj_warmup_fwd(const mjModel* m, mjData* d, int nwarmup) {
mj_forward(m, d);
// extra solver iterations to improve warmstart (qacc) at center point
for( int rep=1; rep<nwarmup; rep++ )
mj_forwardSkip(m, d, mjSTAGE_VEL, 1);
}
// mj_warmup_t
void mj_warmup_inv(const mjModel* m, mjData* d, int nwarmup) {
mj_inverse(m, d);
}
size_t mjDeriv_deriv_size(const mjModel* m, int xN, int fN)
{
int nv = m->nv;
return fN*xN*nv*nv;
}
// An implementation of mj_moveSkip
void mj_warmFowardSkip(const mjModel* m,
mjData* d,
mjtStage stage,
int n,
mjtNum* warmstart)
{
mju_copy(d->qacc_warmstart, warmstart, m->nv);
mj_forwardSkip(m, d, stage, n);
}
// An implementation of mj_moveSkip
void mj_invSkip(const mjModel* m,
mjData* d,
mjtStage stage,
int n,
mjtNum* warmstart)
{
mj_inverseSkip(m, d, stage, n);
}
void perturb_fwd_inv(mjDeriv* deriv,
int xk,
int threadid,
int i,
mj_moveSkip move)
{
mjtNum* warmstart = deriv->warmstart;
mjtStage stage = deriv->stages[xk];
mjtNum eps = deriv->eps;
const mjModel* m = deriv->m;
mjData* d = deriv->d[threadid];
// save output for center point and warmstart (needed in forward only)
mjtNum* target = deriv->xs->fs[xk](d);
const mjtNum originali = deriv->xs->fs[xk](deriv->dmain)[i];
// perturb selected target
target[i] += eps;
// move forward or backward by
(*move)(m, d, stage, 1, warmstart);
// undo perturbation
target[i] = originali;
}
// perturb_t interface
void perturb_fwd(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv(deriv, xk, threadid, i, &mj_warmFowardSkip);
}
// perturb_t interface
void perturb_inv(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv(deriv, xk, threadid, i, &mj_invSkip);
}
void perturb_fwd_inv_pos(mjDeriv* deriv,
int xk,
int threadid,
int i,
mj_moveSkip* move)
{
mjtNum* warmstart = deriv->warmstart;
mjtStage stage = deriv->stages[xk];
mjtNum eps = deriv->eps;
const mjModel* m = deriv->m;
mjData* d = deriv->d[threadid];
const mjData* dmain = deriv->dmain;
// get joint id for this dof
int jid = m->dof_jntid[i];
// get quaternion address and dof position within quaternion (-1: not in quaternion)
int quatadr = -1, dofpos = 0;
if( m->jnt_type[jid]==mjJNT_BALL )
{
quatadr = m->jnt_qposadr[jid];
dofpos = i - m->jnt_dofadr[jid];
}
else if( m->jnt_type[jid]==mjJNT_FREE && i>=m->jnt_dofadr[jid]+3 )
{
quatadr = m->jnt_qposadr[jid] + 3;
dofpos = i - m->jnt_dofadr[jid] - 3;
}
// apply quaternion or simple perturbation
if( quatadr>=0 )
{
mjtNum angvel[3] = {0,0,0};
angvel[dofpos] = eps;
mju_quatIntegrate(d->qpos+quatadr, angvel, 1);
}
else
d->qpos[m->jnt_qposadr[jid] + i - m->jnt_dofadr[jid]] += eps;
// evaluate dynamics, with center warmstart
move(m, d, stage, 1, warmstart);
// undo perturbation
mju_copy(d->qpos, dmain->qpos, m->nq);
}
// perturb_t interface
void perturb_fwd_pos(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv_pos(deriv, xk, threadid, i, &mj_warmFowardSkip);
}
// perturb_t interface
void perturb_inv_pos(mjDeriv* deriv,
int xk,
int threadid,
int i)
{
perturb_fwd_inv_pos(deriv, xk, threadid, i, &mj_invSkip);
}
perturb_t mjd_enum2perturber(MJDGetter_enum attr, int isforward)
{
if (isforward) {
if (MJDGetter_qpos == attr)
return &perturb_fwd_pos;
else
return &perturb_fwd;
} else {
if (MJDGetter_qpos == attr)
return &perturb_inv_pos;
else
return &perturb_inv;
}
}
perturb_t* mjPerturb_new(size_t len, MJDGetter_enum* attrs, int isforward)
{
perturb_t* perturbers = (perturb_t*) mju_malloc(len * sizeof(perturb_t));
for (int i = 0; i < len; i++)
perturbers[i] = mjd_enum2perturber(attrs[i], isforward);
return perturbers;
}
void mjPerturb_free(perturb_t* perturbers)
{
mju_free(perturbers);
}
mjtStage mjd_enum2stage(MJDGetter_enum attr)
{
switch (attr)
{
case MJDGetter_qpos:
return mjSTAGE_NONE;
case MJDGetter_qvel:
return mjSTAGE_POS;
default:
return mjSTAGE_VEL;
}
}
mjtStage* mjStages_new(size_t len, MJDGetter_enum* attrs)
{
mjtStage* stages = (mjtStage*) mju_malloc(len * sizeof(mjtStage));
for (int i = 0; i < len; i++)
stages[i] = mjd_enum2stage(attrs[i]);
return stages;
}
void mjStages_print(mjtStage* stages, size_t len)
{
for (size_t i = 0; i < len; i++)
printf("%s%d", i ? ", " : "", stages[i]);
printf("\n");
}
void mjStages_free(mjtStage* stages)
{
mju_free(stages);
}
void mjDeriv_compute(mjDeriv* deriv, int threadid)
{
mjData* d = deriv->d[threadid];
const mjModel* m = deriv->m;
const mjData* dmain = deriv->dmain;
mjDGetters* fs = deriv->fs;
mjDGetters* xs = deriv->xs;
int nthread = deriv->nthread;
mjtNum eps = deriv->eps;
int nv = m->nv;
// allocate stack space for result at center
mjMARKSTACK
mjtNum* center = mj_stackAlloc(d, nv);
mjtNum* warmstart = mj_stackAlloc(d, nv);
// prepare static schedule: range of derivative columns to be computed by this thread
int chunk = (m->nv + nthread-1) / nthread;
int istart = threadid * chunk;
int iend = mjMIN(istart + chunk, m->nv);
// copy state and control from dmain to thread-specific d
mj_copyStateCtrlData(d, m, dmain);
// run full computation at center point (usually faster than copying dmain)
(*deriv->warmer)(m, d, deriv->nwarmup);
for (int fk=0; fk < fs->len; fk++) {
// select target vector and original vector for force or acceleration derivative
mjtNum* output = fs->fs[fk](d); // d->qacc
// save output for center point and warmstart (needed in forward only)
mju_copy(center, output, nv);
mju_copy(warmstart, d->qacc_warmstart, nv);
for (int xk=0; xk < xs->len; xk++) {
deriv->warmstart = warmstart;
for( int i=istart; i<iend; i++ ) {
(deriv->perturbers[xk])(deriv, xk, threadid, i);
// compute column i of derivative 2
for( int j=0; j<nv; j++ ) {
size_t idx = (fk*xs->len + xs->len-xk-1)*nv*nv + j*nv + i;
deriv->deriv[idx] = (output[j] - center[j])/eps;
}
}
}
}
mjFREESTACK
}
void mjDeriv_print(mjDeriv* mjd)
{
printf("mjd->m->nv: %d\n", mjd->m->nv);
printf("mjd->dmain->nefc: %d\n", mjd->dmain->nefc);
printf("mjd->deriv: %p\n", (void*) mjd->deriv);
printf("mjd->fs: "); mjDGetters_print(mjd->fs);
printf("mjd->xs: "); mjDGetters_print(mjd->xs);
printf("mjd->stages:"); mjStages_print(mjd->stages, mjd->xs->len);
printf("mjd->eps: %f\n", mjd->eps);
printf("mjd->nthread: %d\n", mjd->nthread);
printf("mjd->nwarmup: %d\n", mjd->nwarmup);
printf("mjd->warmer: %s\n", mjd->warmer == mj_warmup_fwd ? "fwd" :
(mjd->warmer == mj_warmup_inv ? "inv" : "error"));
}
void mjDeriv_compute_mp(mjDeriv* deriv)
{
int nthread = deriv->nthread;
const mjModel* m = deriv->m;
// set up OpenMP (if not enabled, this does nothing)
omp_set_dynamic(0);
omp_set_num_threads(nthread);
//mjData** d = deriv->d;
mjData* d[MAXTHREAD];
for( int n=0; n<nthread; n++ ) {
mjData* dn = mj_makeData(m);
d[n] = dn;
if ( d[n] == NULL ) {
perror("Unable to allocate memory\n");
exit(EXIT_FAILURE);
}
}
deriv->d = d;
// run worker threads in parallel if OpenMP is enabled
#pragma omp parallel for schedule(static)
for( int n=0; n<nthread; n++ ) {
(*deriv->compute)(deriv, n);
}
for( int n=0; n<nthread; n++ )
mj_deleteData(d[n]);
}
void mjDeriv_free(mjDeriv* mjderiv)
{
mju_free(mjderiv);
}
mjDeriv* mjDeriv_new(const mjModel* m, const mjData* dmain, mjtNum* deriv,
mjDGetters* fs, mjDGetters* xs, perturb_t* perturbers,
mjtStage* stages,
mj_warmup_t* warmer, int nwarmup, double eps, int nthread)
{
mjDeriv* mjd = (mjDeriv*) mju_malloc(sizeof(mjDeriv));
mjd->m = m;
mjd->dmain = dmain;
mjd->deriv = deriv;
mjd->fs = fs;
mjd->xs = fs;
mjd->perturbers = perturbers;
mjd->nwarmup = nwarmup;
mjd->warmer = warmer;
mjd->xs = xs;
mjd->perturbers = perturbers;
mjd->stages = stages;
mjd->nthread = nthread;
// mjd->d = (mjData**) mju_malloc(nthread * sizeof(mjData*));
mjd->eps = eps;
mjd->compute = &mjDeriv_compute;
mjd->compute_mp = &mjDeriv_compute_mp;
mjd->free = &mjDeriv_free;
return mjd;
}
void mjDeriv_free_default(mjDeriv* mjderiv)
{
// mju_free(mjderiv->d);
mju_free(mjderiv->stages);
mju_free(mjderiv->perturbers);
(*mjderiv->xs->free)(mjderiv->xs);
(*mjderiv->fs->free)(mjderiv->fs);
mju_free(mjderiv);
}
mjDeriv* mjDeriv_new_default(const mjModel* m, const mjData* dmain, mjtNum* deriv,
int isforward, int nwarmup, double eps, int nthread)
{
size_t fN = 1;
size_t xN = 3;
mjDeriv* mjd = (mjDeriv*) mju_malloc(sizeof(mjDeriv));
mjd->m = m;
mjd->dmain = dmain;
mjd->deriv = deriv;
MJDGetter_enum fs_attr[1] = { MJDGetter_qacc };
if (isforward) {
fs_attr[0] = MJDGetter_qacc ;
} else {
fs_attr[0] = MJDGetter_qfrc_inverse;
}
mjDGetters* fs = mjDGetters_new(fN, fs_attr);
mjd->fs = fs;
MJDGetter_enum xs_attr[3] = { MJDGetter_qfrc_applied, MJDGetter_qvel, MJDGetter_qpos };
if (isforward) {
xs_attr[0] = MJDGetter_qfrc_applied;
} else {
xs_attr[0] = MJDGetter_qacc;
}
mjDGetters* xs = mjDGetters_new(xN, xs_attr);
perturb_t* perturbers = (perturb_t*) mju_malloc(xN * sizeof(perturb_t));
if (isforward) {
perturbers[0] = &perturb_fwd;
perturbers[1] = &perturb_fwd;
perturbers[2] = &perturb_fwd_pos;
mjd->nwarmup = nwarmup;
mjd->warmer = &mj_warmup_fwd;
} else {
perturbers[0] = &perturb_inv;
perturbers[1] = &perturb_inv;
perturbers[2] = &perturb_inv_pos;
mjd->nwarmup = 0;
mjd->warmer = &mj_warmup_inv;
}
mjd->xs = xs;
mjd->perturbers = perturbers;
mjd->stages = (mjtStage*) mju_malloc(xN * sizeof(mjtStage));
mjd->stages[0] = mjSTAGE_VEL;
mjd->stages[1] = mjSTAGE_POS;
mjd->stages[2] = mjSTAGE_NONE;
mjd->nthread = nthread;
// mjd->d = (mjData**) mju_malloc(nthread * sizeof(mjData*));
mjd->eps = eps;
mjd->compute = &mjDeriv_compute;
mjd->compute_mp = &mjDeriv_compute_mp;
mjd->free = &mjDeriv_free_default;
return mjd;
}
double relnorm(mjtNum* residual, mjtNum* base, int n)
{
mjtNum L1res = 0, L1base = 0;
for( int i=0; i<n; i++ )
{
L1res += mju_abs(residual[i]);
L1base += mju_abs(base[i]);
}
return (double) mju_log10(mju_max(mjMINVAL,L1res/mju_max(mjMINVAL,L1base)));
}
// names of residuals for accuracy check
const char* accuracy[8] = {
"G2*F2 - I ",
"G2 - G2' ",
"G1 - G1' ",
"F2 - F2' ",
"G1 + G2*F1",
"G0 + G2*F0",
"F1 + F2*G1",
"F0 + F2*G0"
};
// check accuracy of derivatives using known mathematical identities
void checkderiv(const mjModel* m, mjtNum* deriv, mjtNum error[8])
{
mjData* d = mj_makeData(m);
int nv = m->nv;
// allocate space
mjMARKSTACK
mjtNum* mat = mj_stackAlloc(d, nv*nv);
// get pointers to derivative matrices
mjtNum* G0 = deriv; // dinv/dpos
mjtNum* G1 = deriv + nv*nv; // dinv/dvel
mjtNum* G2 = deriv + 2*nv*nv; // dinv/dacc = dqfrc_inverse / dacc
mjtNum* F0 = deriv + 3*nv*nv; // dacc/dpos
mjtNum* F1 = deriv + 4*nv*nv; // dacc/dvel
mjtNum* F2 = deriv + 5*nv*nv; // dacc/dfrc = dacc / dfrc_applied
// G2*F2 - I
mju_mulMatMat(mat, G2, F2, nv, nv, nv);
for( int i=0; i<nv; i++ )
mat[i*(nv+1)] -= 1;
error[0] = relnorm(mat, G2, nv*nv);
// G2 - G2'
mju_transpose(mat, G2, nv, nv);
mju_sub(mat, mat, G2, nv*nv);
error[1] = relnorm(mat, G2, nv*nv);
// G1 - G1'
mju_transpose(mat, G1, nv, nv);
mju_sub(mat, mat, G1, nv*nv);
error[2] = relnorm(mat, G1, nv*nv);
// F2 - F2'
mju_transpose(mat, F2, nv, nv);
mju_sub(mat, mat, F2, nv*nv);
error[3] = relnorm(mat, F2, nv*nv);
// G1 + G2*F1
mju_mulMatMat(mat, G2, F1, nv, nv, nv);
mju_addTo(mat, G1, nv*nv);
error[4] = relnorm(mat, G1, nv*nv);
// G0 + G2*F0
mju_mulMatMat(mat, G2, F0, nv, nv, nv);
mju_addTo(mat, G0, nv*nv);
error[5] = relnorm(mat, G0, nv*nv);
// F1 + F2*G1
mju_mulMatMat(mat, F2, G1, nv, nv, nv);
mju_addTo(mat, F1, nv*nv);
error[6] = relnorm(mat, F1, nv*nv);
// F0 + F2*G0
mju_mulMatMat(mat, F2, G0, nv, nv, nv);
mju_addTo(mat, F0, nv*nv);
error[7] = relnorm(mat, F0, nv*nv);
mjFREESTACK
mj_deleteData(d);
}
int main(int argc, char** argv)
{
// gloval variables: internal
const int MAXEPOCH = 100; // maximum number of epochs
mjtNum* deriv = 0; // dynamics derivatives (6*nv*nv):
// dinv/dpos, dinv/dvel, dinv/dacc, dacc/dpos, dacc/dvel, dacc/dfrc
int nthread = 0;
int niter = 30; // fixed number of solver iterations for finite-differencing
int nwarmup = 3; // center point repetitions to improve warmstart
int nepoch = 20; // number of timing epochs
int nstep = 500; // number of simulation steps per epoch
double eps = 1e-6; // finite-difference epsilon
// print help if not enough arguments
if( argc<2 )
{
printf("\n Arguments: modelfile [nthread niter nwarmup nepoch nstep eps]\n\n");
return 1;
}
// default nthread = number of logical cores (usually optimal)
nthread = omp_get_num_procs();
// get numeric command-line arguments
if( argc>2 )
sscanf(argv[2], "%d", &nthread);
if( argc>3 )
sscanf(argv[3], "%d", &niter);
if( argc>4 )
sscanf(argv[4], "%d", &nwarmup);
if( argc>5 )
sscanf(argv[5], "%d", &nepoch);
if( argc>6 )
sscanf(argv[6], "%d", &nstep);
if( argc>7 )
sscanf(argv[7], "%lf", &eps);
// check number of threads
if( nthread<1 || nthread>MAXTHREAD )
{
printf("nthread must be between 1 and %d\n", MAXTHREAD);
return 1;
}
// check number of epochs
if( nepoch<1 || nepoch>MAXEPOCH )
{
printf("nepoch must be between 1 and %d\n", MAXEPOCH);
return 1;
}
// activate and load model
mj_activate("mjkey.txt");
mjModel* m = 0;
if( strlen(argv[1])>4 && !strcmp(argv[1]+strlen(argv[1])-4, ".mjb") )
m = mj_loadModel(argv[1], NULL);
else
m = mj_loadXML(argv[1], NULL, NULL, 0);
if( !m )
{
printf("Could not load modelfile '%s'\n", argv[1]);
return 1;
}
// print arguments
#if defined(_OPENMP)
printf("\nnthread : %d (OpenMP)\n", nthread);
#else
printf("\nnthread : %d (serial)\n", nthread);
#endif
printf("niter : %d\n", niter);
printf("nwarmup : %d\n", nwarmup);
printf("nepoch : %d\n", nepoch);
printf("nstep : %d\n", nstep);
printf("eps : %g\n\n", eps);
// make mjData: main, per-thread
mjData* dmain = mj_makeData(m);
int nv = m->nv;
deriv = alloc_deriv(m);
mjDeriv* mjderiv_inv = mjDeriv_new_default(m, dmain, deriv, 0, nwarmup, eps, nthread);
mjDeriv* mjderiv_fwd = mjDeriv_new_default(m, dmain, deriv + 3*nv*nv, 1, nwarmup, eps, nthread);
mjDeriv* mj_derivs[2] = {mjderiv_inv, mjderiv_fwd};
// save solver options
int save_iterations = m->opt.iterations;
mjtNum save_tolerance = m->opt.tolerance;
// allocate statistics
int nefc = 0;
double cputm[MAXEPOCH][2];
mjtNum error[MAXEPOCH][8];
// run epochs, collect statistics
for( int epoch=0; epoch<nepoch; epoch++ )
{
// set solver options for main simulation
m->opt.iterations = save_iterations;
m->opt.tolerance = save_tolerance;
// advance main simulation for nstep
for( int i=0; i<nstep; i++ )
mj_step(m, dmain);
// count number of active constraints
nefc += dmain->nefc;
// set solver options for finite differences
m->opt.iterations = niter;
m->opt.tolerance = 0;
// start timer
double starttm = omp_get_wtime();
// test forward and inverse
for( int isforward=0; isforward<2; isforward++ ) {
mjDeriv* mjderiv = mj_derivs[isforward];
(*mjderiv->compute_mp)(mjderiv);
// record duration in ms
cputm[epoch][isforward] = 1000*(omp_get_wtime() - starttm);
}
// check derivatives
checkderiv(m, deriv, error[epoch]);
}
// compute statistics
double mcputm[2] = {0,0}, merror[8] = {0,0,0,0,0,0,0,0};
for( int epoch=0; epoch<nepoch; epoch++ )
{
mcputm[0] += cputm[epoch][0];
mcputm[1] += cputm[epoch][1];
for( int ie=0; ie<8; ie++ )
merror[ie] += error[epoch][ie];
}
// print sizes, timing, accuracy
printf("sizes : nv %d, nefc %d\n\n", m->nv, nefc/nepoch);
printf("inverse : %.2f ms\n", mcputm[0]/nepoch);
printf("forward : %.2f ms\n\n", mcputm[1]/nepoch);
printf("accuracy: log10(residual L1 relnorm)\n");
printf("------------------------------------\n");
for( int ie=0; ie<8; ie++ )
printf(" %s : %.2g\n", accuracy[ie], merror[ie]/nepoch);
printf("\n");
// shut down
for (int isforward = 0; isforward < 2; isforward++) {
mjDeriv* mjderiv = mj_derivs[isforward];
(*mjderiv->free)(mjderiv);
}
mju_free(deriv);
mj_deleteData(dmain);
mj_deleteModel(m);
mj_deactivate();
return 0;
}
|
resource_strings.h | #pragma once
#include <torch/csrc/jit/code_template.h>
namespace torch {
namespace jit {
namespace fuser {
namespace cpu {
/*with type_as not checking type of its input, a fusion group can have non-fp32
tensor as input. Correct code for this case is generated, however, nvrtc does
not know how to handle int*_t integer types, so typedefs help it handle those
cases*/
static auto type_declarations_template = CodeTemplate(R"(
#define POS_INFINITY INFINITY
#define NEG_INFINITY -INFINITY
typedef ${IndexType} IndexType;
template<typename T, size_t N>
struct TensorInfo {
T* data;
IndexType sizes[N];
IndexType strides[N];
};
template<typename T>
struct TensorInfo<T, 0> {
T * data;
};
)");
static auto cpu_compilation_unit_template = CodeTemplate(R"(
#include <math.h>
#include <cstddef>
#include <cstdint>
template <typename scalar_t>
scalar_t rsqrtf(scalar_t x) {
return 1.0/sqrtf(x);
}
${type_declarations}
#define OMP_THRESHOLD 100000
static void ${kernelName}_kernel(IndexType totalElements, ${formals}) {
#pragma omp parallel for if(totalElements > OMP_THRESHOLD)
for (IndexType linearIndex = 0;
linearIndex < totalElements;
linearIndex += 1) {
// Convert `linearIndex` into an offset of tensor:
${tensorOffsets}
// calculate the results
${kernelBody}
}
}
extern "C"
void ${kernelName}(IndexType totalElements, void ** args) {
${kernelName}_kernel(totalElements ${,argument_loads});
}
)");
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace torch
|
GB_unaryop__ainv_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_bool
// op(A') function: GB_tran__ainv_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(Index shift=0; shift<threads; ++shift)
{
Index i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#pragma omp atomic
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((std::size_t(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((std::size_t(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index m = this->m_mc;
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::evalTo(dst, lhs, rhs);
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::addTo(dst, lhs, rhs);
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::subTo(dst, lhs, rhs);
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
openmp_section1.c | ///TAFFO_TEST_ARGS -fopenmp
#include <stdio.h>
#define N (100)
int main(int argc, char *argv[])
{
float first_section __attribute((annotate("target('first_section') scalar(range(0,10) final)"))) =
1.0f;
float second_section
__attribute((annotate("target('second_section') scalar(range(0,2000) final)"))) =
363;
float result
__attribute((annotate("target('result') scalar(range(0,2000)) final)"))) =
0;
#pragma omp parallel num_threads(4)
{
#pragma omp sections
{
#pragma omp section
{ first_section *= 2.1f; }
#pragma omp section
second_section *= 5.4f;
}
}
result = first_section + second_section;
printf("result: %f\n", result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.