source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
test-float-libmvec-sincosf-main.c | /* Test for vector sincosf ABI.
Copyright (C) 2016-2020 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <math.h>
#define N 1000
float x[N], s[N], c[N];
float *s_ptrs[N];
float *c_ptrs[N];
int
test_sincosf_abi (void)
{
int i;
for(i = 0; i < N; i++)
{
x[i] = i / 3;
s_ptrs[i] = &s[i];
c_ptrs[i] = &c[i];
}
#pragma omp simd
for(i = 0; i < N; i++)
sincosf (x[i], s_ptrs[i], c_ptrs[i]);
return 0;
}
|
mpy_lowlevel_strided_loops.h | #ifndef __MPY_LOWLEVEL_STRIDED_LOOPS_H
#define __MPY_LOWLEVEL_STRIDED_LOOPS_H
#include <npy_config.h>
/*
* This function pointer is for unary operations that input an
* arbitrarily strided one-dimensional array segment and output
* an arbitrarily strided array segment of the same size.
* It may be a fully general function, or a specialized function
* when the strides or item size have particular known values.
*
* Examples of unary operations are a straight copy, a byte-swap,
* and a casting operation,
*
* The 'transferdata' parameter is slightly special, following a
* generic auxiliary data pattern defined in ndarraytypes.h
* Use NPY_AUXDATA_CLONE and NPY_AUXDATA_FREE to deal with this data.
*
*/
typedef void (PyMicArray_StridedUnaryOp)(void *dst, npy_intp dst_stride,
void *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
NpyAuxData *transferdata, int device);
/*
* This is for pointers to functions which behave exactly as
* for PyArray_StridedUnaryOp, but with an additional mask controlling
* which values are transformed.
*
* In particular, the 'i'-th element is operated on if and only if
* mask[i*mask_stride] is true.
*/
typedef void (PyMicArray_MaskedStridedUnaryOp)(void *dst, npy_intp dst_stride,
void *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
npy_intp N, npy_intp src_itemsize,
NpyAuxData *transferdata, int device);
/*
* Gives back a function pointer to a specialized function for copying
* strided memory. Returns NULL if there is a problem with the inputs.
*
* aligned:
* Should be 1 if the src and dst pointers are always aligned,
* 0 otherwise.
* src_stride:
* Should be the src stride if it will always be the same,
* NPY_MAX_INTP otherwise.
* dst_stride:
* Should be the dst stride if it will always be the same,
* NPY_MAX_INTP otherwise.
* itemsize:
* Should be the item size if it will always be the same, 0 otherwise.
*
*/
NPY_NO_EXPORT PyMicArray_StridedUnaryOp *
PyMicArray_GetStridedCopyFn(int aligned,
npy_intp src_stride, npy_intp dst_stride,
npy_intp itemsize);
/*
* Gives back a function pointer to a specialized function for copying
* and swapping strided memory. This assumes each element is a single
* value to be swapped.
*
* For information on the 'aligned', 'src_stride' and 'dst_stride' parameters
* see above.
*
* Parameters are as for PyArray_GetStridedCopyFn.
*/
NPY_NO_EXPORT PyMicArray_StridedUnaryOp *
PyMicArray_GetStridedCopySwapFn(int aligned,
npy_intp src_stride, npy_intp dst_stride,
npy_intp itemsize);
/*
* Gives back a function pointer to a specialized function for copying
* and swapping strided memory. This assumes each element is a pair
* of values, each of which needs to be swapped.
*
* For information on the 'aligned', 'src_stride' and 'dst_stride' parameters
* see above.
*
* Parameters are as for PyArray_GetStridedCopyFn.
*/
NPY_NO_EXPORT PyMicArray_StridedUnaryOp *
PyMicArray_GetStridedCopySwapPairFn(int aligned,
npy_intp src_stride, npy_intp dst_stride,
npy_intp itemsize);
/*
* Gives back a transfer function and transfer data pair which copies
* the data from source to dest, truncating it if the data doesn't
* fit, and padding with zero bytes if there's too much space.
*
* For information on the 'aligned', 'src_stride' and 'dst_stride' parameters
* see above.
*
* Returns NPY_SUCCEED or NPY_FAIL
*/
/*NPY_NO_EXPORT int
PyMicArray_GetStridedZeroPadCopyFn(int aligned, int unicode_swap,
npy_intp src_stride, npy_intp dst_stride,
npy_intp src_itemsize, npy_intp dst_itemsize,
PyMicArray_StridedUnaryOp **outstransfer,
NpyAuxData **outtransferdata);*/
/*
* For casts between built-in numeric types,
* this produces a function pointer for casting from src_type_num
* to dst_type_num. If a conversion is unsupported, returns NULL
* without setting a Python exception.
*/
NPY_NO_EXPORT PyMicArray_StridedUnaryOp *
PyMicArray_GetStridedNumericCastFn(int aligned,
npy_intp src_stride, npy_intp dst_stride,
int src_type_num, int dst_type_num);
/*
* These two functions copy or convert the data of an n-dimensional array
* to/from a 1-dimensional strided buffer. These functions will only call
* 'stransfer' with the provided dst_stride/src_stride and
* dst_strides[0]/src_strides[0], so the caller can use those values to
* specialize the function.
* Note that even if ndim == 0, everything needs to be set as if ndim == 1.
*
* The return value is the number of elements it couldn't copy. A return value
* of 0 means all elements were copied, a larger value means the end of
* the n-dimensional array was reached before 'count' elements were copied.
*
* ndim:
* The number of dimensions of the n-dimensional array.
* dst/src/mask:
* The destination, source or mask starting pointer.
* dst_stride/src_stride/mask_stride:
* The stride of the 1-dimensional strided buffer
* dst_strides/src_strides:
* The strides of the n-dimensional array.
* dst_strides_inc/src_strides_inc:
* How much to add to the ..._strides pointer to get to the next stride.
* coords:
* The starting coordinates in the n-dimensional array.
* coords_inc:
* How much to add to the coords pointer to get to the next coordinate.
* shape:
* The shape of the n-dimensional array.
* shape_inc:
* How much to add to the shape pointer to get to the next shape entry.
* count:
* How many elements to transfer
* src_itemsize:
* How big each element is. If transferring between elements of different
* sizes, for example a casting operation, the 'stransfer' function
* should be specialized for that, in which case 'stransfer' will use
* this parameter as the source item size.
* stransfer:
* The strided transfer function.
* transferdata:
* An auxiliary data pointer passed to the strided transfer function.
* This follows the conventions of NpyAuxData objects.
*/
NPY_NO_EXPORT npy_intp
PyMicArray_TransferNDimToStrided(npy_intp ndim,
char *dst, npy_intp dst_stride,
char *src, npy_intp *src_strides, npy_intp src_strides_inc,
npy_intp *coords, npy_intp coords_inc,
npy_intp *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyMicArray_StridedUnaryOp *stransfer,
NpyAuxData *transferdata, int transferdevice);
NPY_NO_EXPORT npy_intp
PyMicArray_TransferStridedToNDim(npy_intp ndim,
char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
npy_intp *coords, npy_intp coords_inc,
npy_intp *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyMicArray_StridedUnaryOp *stransfer,
NpyAuxData *transferdata, int transferdevice);
NPY_NO_EXPORT npy_intp
PyMicArray_TransferMaskedStridedToNDim(npy_intp ndim,
char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
npy_intp *coords, npy_intp coords_inc,
npy_intp *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyMicArray_MaskedStridedUnaryOp *stransfer,
NpyAuxData *transferdata, int transferdevice);
#pragma omp declare target
/*
* Return number of elements that must be peeled from
* the start of 'addr' with 'nvals' elements of size 'esize'
* in order to reach 'alignment'.
* alignment must be a power of two.
* see npy_blocked_end for an example
*/
static NPY_INLINE npy_uintp
mpy_aligned_block_offset(const void * addr, const npy_uintp esize,
const npy_uintp alignment, const npy_uintp nvals)
{
const npy_uintp offset = (npy_uintp)addr & (alignment - 1);
npy_uintp peel = offset ? (alignment - offset) / esize : 0;
peel = nvals < peel ? nvals : peel;
return peel;
}
/*
* Return upper loop bound for an array of 'nvals' elements
* of size 'esize' peeled by 'offset' elements and blocking to
* a vector size of 'vsz' in bytes
*
* example usage:
* npy_intp i;
* double v[101];
* npy_intp esize = sizeof(v[0]);
* npy_intp peel = npy_aligned_block_offset(v, esize, 16, n);
* // peel to alignment 16
* for (i = 0; i < peel; i++)
* <scalar-op>
* // simd vectorized operation
* for (; i < npy_blocked_end(peel, esize, 16, n); i += 16 / esize)
* <blocked-op>
* // handle scalar rest
* for(; i < n; i++)
* <scalar-op>
*/
static NPY_INLINE npy_uintp
mpy_blocked_end(const npy_uintp offset, const npy_uintp esize,
const npy_uintp vsz, const npy_uintp nvals)
{
return nvals - offset - (nvals - offset) % (vsz / esize);
}
/* byte swapping functions */
static NPY_INLINE npy_uint16
mpy_bswap2(npy_uint16 x)
{
return ((x & 0xffu) << 8) | (x >> 8);
}
/*
* treat as int16 and byteswap unaligned memory,
* some cpus don't support unaligned access
*/
static NPY_INLINE void
mpy_bswap2_unaligned(char * x)
{
char a = x[0];
x[0] = x[1];
x[1] = a;
}
static NPY_INLINE npy_uint32
mpy_bswap4(npy_uint32 x)
{
return __builtin_bswap32(x);
}
static NPY_INLINE void
mpy_bswap4_unaligned(char * x)
{
char a = x[0];
x[0] = x[3];
x[3] = a;
a = x[1];
x[1] = x[2];
x[2] = a;
}
static NPY_INLINE npy_uint64
mpy_bswap8(npy_uint64 x)
{
return __builtin_bswap64(x);
}
static NPY_INLINE void
mpy_bswap8_unaligned(char * x)
{
char a = x[0]; x[0] = x[7]; x[7] = a;
a = x[1]; x[1] = x[6]; x[6] = a;
a = x[2]; x[2] = x[5]; x[5] = a;
a = x[3]; x[3] = x[4]; x[4] = a;
}
#pragma omp end declare target
/* Start raw iteration */
#define NPY_RAW_ITER_START(idim, ndim, coord, shape) \
memset((coord), 0, (ndim) * sizeof(coord[0])); \
do {
/* Increment to the next n-dimensional coordinate for one raw array */
#define NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape, data, strides) \
for ((idim) = 1; (idim) < (ndim); ++(idim)) { \
if (++(coord)[idim] == (shape)[idim]) { \
(coord)[idim] = 0; \
(data) -= ((shape)[idim] - 1) * (strides)[idim]; \
} \
else { \
(data) += (strides)[idim]; \
break; \
} \
} \
} while ((idim) < (ndim))
/* Increment to the next n-dimensional coordinate for two raw arrays */
#define NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape, \
dataA, stridesA, dataB, stridesB) \
for ((idim) = 1; (idim) < (ndim); ++(idim)) { \
if (++(coord)[idim] == (shape)[idim]) { \
(coord)[idim] = 0; \
(dataA) -= ((shape)[idim] - 1) * (stridesA)[idim]; \
(dataB) -= ((shape)[idim] - 1) * (stridesB)[idim]; \
} \
else { \
(dataA) += (stridesA)[idim]; \
(dataB) += (stridesB)[idim]; \
break; \
} \
} \
} while ((idim) < (ndim))
/* Increment to the next n-dimensional coordinate for three raw arrays */
#define NPY_RAW_ITER_THREE_NEXT(idim, ndim, coord, shape, \
dataA, stridesA, \
dataB, stridesB, \
dataC, stridesC) \
for ((idim) = 1; (idim) < (ndim); ++(idim)) { \
if (++(coord)[idim] == (shape)[idim]) { \
(coord)[idim] = 0; \
(dataA) -= ((shape)[idim] - 1) * (stridesA)[idim]; \
(dataB) -= ((shape)[idim] - 1) * (stridesB)[idim]; \
(dataC) -= ((shape)[idim] - 1) * (stridesC)[idim]; \
} \
else { \
(dataA) += (stridesA)[idim]; \
(dataB) += (stridesB)[idim]; \
(dataC) += (stridesC)[idim]; \
break; \
} \
} \
} while ((idim) < (ndim))
/* Increment to the next n-dimensional coordinate for four raw arrays */
#define NPY_RAW_ITER_FOUR_NEXT(idim, ndim, coord, shape, \
dataA, stridesA, \
dataB, stridesB, \
dataC, stridesC, \
dataD, stridesD) \
for ((idim) = 1; (idim) < (ndim); ++(idim)) { \
if (++(coord)[idim] == (shape)[idim]) { \
(coord)[idim] = 0; \
(dataA) -= ((shape)[idim] - 1) * (stridesA)[idim]; \
(dataB) -= ((shape)[idim] - 1) * (stridesB)[idim]; \
(dataC) -= ((shape)[idim] - 1) * (stridesC)[idim]; \
(dataD) -= ((shape)[idim] - 1) * (stridesD)[idim]; \
} \
else { \
(dataA) += (stridesA)[idim]; \
(dataB) += (stridesB)[idim]; \
(dataC) += (stridesC)[idim]; \
(dataD) += (stridesD)[idim]; \
break; \
} \
} \
} while ((idim) < (ndim))
#endif |
openmp-ex20.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main(void)
{
#pragma omp parallel
{
int thread_num = omp_get_thread_num(), i;
char wait[BUFSIZ] = {'\0'};
for (i = 0; i < 4 * thread_num; i++) wait[i] = ' ';
sleep(thread_num);
printf("%srow row row your boat...\n",wait);
/* We just have to make sure barriers are seen by all threads */
if (thread_num > 0) {
#pragma omp barrier
sleep(thread_num);
printf("%s...gently down the stream...\n",wait);
}
}
printf("Better.\n");
return 0;
}
|
trsm_x_bsr_u_hi_row.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
const ALPHA_INT bs = A->block_size;
const ALPHA_INT m = A->rows * bs;
const ALPHA_INT n = A->cols * bs;
const ALPHA_INT bs2 = bs * bs;
const ALPHA_INT b_rows = m / bs;
const ALPHA_INT b_cols = n / bs;
const alphasparse_layout_t block_layout = A->block_layout;
if(block_layout != ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
printf("layout not consistent!!!\n");
exit(-1);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
ALPHA_Number* temp = (ALPHA_Number*) alpha_malloc(bs*sizeof(ALPHA_Number));
for (ALPHA_INT br = b_rows - 1; br >= 0; br--)
{
for(ALPHA_INT i = 0 ; i < bs ; i++){
alpha_setzero(temp[i]);
}
ALPHA_INT diagBlock = -1;
for (ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
if(bc > br)
//row-major
for(ALPHA_INT row = 0; row < bs; row++)
{
//all entities belongs to upper triangle
ALPHA_INT a0_offset = ai * bs2 + row * bs;
for(ALPHA_INT col = 0 ; col < bs ; col++)
{
ALPHA_INT y_offset = (bc * bs + col) * ldy + out_y_col;
ALPHA_INT ele_offset = a0_offset + col;
alpha_madde(temp[row], A->values[ ele_offset ] ,y[y_offset]);
}
}
//diagonal must be none-zero block
if( bc==br ){
diagBlock = ai;
}
}
if(diagBlock == -1)
{
printf("lhs matrix invalid for trsm!!!\n");
exit(-1);
}
//row-major
//right-bottom most
for(ALPHA_INT row = bs - 1; row >=0 ; row--)
{
//upper triangle of block
for(ALPHA_INT col = row + 1 ; col < bs ; col++){
ALPHA_INT y_offset = (br * bs + col) * ldy + out_y_col;
alpha_madde(temp[row] ,A->values[ diagBlock * bs2 + row * bs + col] ,y[y_offset]);
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t,alpha,x[(br * bs + row) * ldx + out_y_col] );
alpha_sub(y[(br * bs + row) * ldy + out_y_col],t,temp[row]);
}
}
alpha_free(temp);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_binop__lxor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int8)
// A*D function (colscale): GB (_AxD__lxor_int8)
// D*A function (rowscale): GB (_DxB__lxor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int8)
// C=scalar+B GB (_bind1st__lxor_int8)
// C=scalar+B' GB (_bind1st_tran__lxor_int8)
// C=A+scalar GB (_bind2nd__lxor_int8)
// C=A'+scalar GB (_bind2nd_tran__lxor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT8 || GxB_NO_LXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
update_ops_matrix_dense_multi.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
//void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim);
//void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim);
void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask);
void multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
if (target_qubit_index_count == 1) {
single_qubit_dense_matrix_gate(target_qubit_index_list[0], matrix, state, dim);
}
else if (target_qubit_index_count == 2) {
double_qubit_dense_matrix_gate_c(target_qubit_index_list[0], target_qubit_index_list[1], matrix, state, dim);
}
else {
//multi_qubit_dense_matrix_gate_old_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_old_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//return;
#ifdef _OPENMP
UINT threshold = 10;
if (dim < (((ITYPE)1) << threshold)) {
multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
}
else {
multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
}
#else
multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
#endif
}
}
void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask) {
memcpy(dst_array, array, sizeof(UINT)*count);
sort_ui(dst_array, count);
for (UINT i = 0; i < count; ++i) {
dst_mask[i] = (1UL << dst_array[i]) - 1;
}
}
void multi_qubit_dense_matrix_gate_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
UINT sort_array[64];
ITYPE mask_array[64];
create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array);
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) {
basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
free((ITYPE*)matrix_mask_list);
}
#ifdef _OPENMP
void multi_qubit_dense_matrix_gate_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
UINT sort_array[64];
ITYPE mask_array[64];
create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array);
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) {
basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
free((ITYPE*)matrix_mask_list);
}
#endif
/*
void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
#ifdef _OPENMP
void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
#endif
*/ |
main.c | /* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
/* These need to be before any possible inclusions of stdint.h or inttypes.h.
* */
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "../generator/make_graph.h"
#include "../generator/utils.h"
#include "common.h"
#include <math.h>
#include <mpi.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdio.h>
#include <limits.h>
#include <stdint.h>
#include <inttypes.h>
static int compare_doubles(const void* a, const void* b) {
double aa = *(const double*)a;
double bb = *(const double*)b;
return (aa < bb) ? -1 : (aa == bb) ? 0 : 1;
}
enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST};
static void get_statistics(const double x[], int n, double r[s_LAST]) {
double temp;
int i;
/* Compute mean. */
temp = 0;
for (i = 0; i < n; ++i) temp += x[i];
temp /= n;
r[s_mean] = temp;
/* Compute std. dev. */
temp = 0;
for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]);
temp /= n - 1;
r[s_std] = sqrt(temp);
/* Sort x. */
double* xx = (double*)xmalloc(n * sizeof(double));
memcpy(xx, x, n * sizeof(double));
qsort(xx, n, sizeof(double), compare_doubles);
/* Get order statistics. */
r[s_minimum] = xx[0];
r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5;
r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5;
r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5;
r[s_maximum] = xx[n - 1];
/* Clean up. */
free(xx);
}
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
setup_globals();
/* Parse arguments. */
int SCALE = 16;
int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */
if (argc >= 2) SCALE = atoi(argv[1]);
if (argc >= 3) edgefactor = atoi(argv[2]);
if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) {
if (rank == 0) {
fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]);
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
uint64_t seed1 = 2, seed2 = 3;
const char* filename = getenv("TMPFILE");
/* If filename is NULL, store data in memory */
tuple_graph tg;
tg.nglobaledges = (int64_t)(edgefactor) << SCALE;
int64_t nglobalverts = (int64_t)(1) << SCALE;
tg.data_in_file = (filename != NULL);
if (tg.data_in_file) {
MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL);
MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile);
MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge));
MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL);
MPI_File_set_atomicity(tg.edgefile, 0);
}
/* Make the raw graph edges. */
/* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by
* validator). */
int num_bfs_roots = 64;
int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t));
int64_t max_used_vertex = 0;
double make_graph_start = MPI_Wtime();
{
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
uint_fast32_t seed[5];
make_mrg_seed(seed1, seed2, seed);
/* As the graph is being generated, also keep a bitmap of vertices with
* incident edges. We keep a grid of processes, each row of which has a
* separate copy of the bitmap (distributed among the processes in the
* row), and then do an allreduce at the end. This scheme is used to avoid
* non-local communication and reading the file separately just to find BFS
* roots. */
MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE;
int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT);
if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) {
bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT);
}
int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes;
int nrows = size / ranks_per_row;
int my_row = -1, my_col = -1;
unsigned char* restrict has_edge = NULL;
MPI_Comm cart_comm;
{
int dims[2] = {size / ranks_per_row, ranks_per_row};
int periods[2] = {0, 0};
MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
}
int in_generating_rectangle = 0;
if (cart_comm != MPI_COMM_NULL) {
in_generating_rectangle = 1;
{
int dims[2], periods[2], coords[2];
MPI_Cart_get(cart_comm, 2, dims, periods, coords);
my_row = coords[0];
my_col = coords[1];
}
MPI_Comm this_col;
MPI_Comm_split(cart_comm, my_col, my_row, &this_col);
MPI_Comm_free(&cart_comm);
has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes);
memset(has_edge, 0, bitmap_size_in_bytes);
/* Every rank in a given row creates the same vertices (for updating the
* bitmap); only one writes them to the file (or final memory buffer). */
packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge));
MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows;
/* fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); */
if (tg.data_in_file) {
tg.edgememory_size = 0;
tg.edgememory = NULL;
} else {
int my_pos = my_row + my_col * nrows;
int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ?
(tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) :
-1;
int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE;
int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) +
FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) +
(my_pos == last_pos ? edges_left : 0);
/* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */
tg.edgememory_size = nedges;
tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge));
}
MPI_Offset block_idx;
for (block_idx = 0; block_idx < block_limit; ++block_idx) {
/* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */
MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges);
MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE);
packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ?
tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) :
buf;
/* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */
if (!tg.data_in_file && block_idx % ranks_per_row == my_col) {
assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size);
}
generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf);
if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */
MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE);
}
ptrdiff_t i;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < edge_count; ++i) {
int64_t src = get_v0_from_edge(&actual_buf[i]);
int64_t tgt = get_v1_from_edge(&actual_buf[i]);
if (src == tgt) continue;
if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT));
}
if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) {
#ifdef _OPENMP
#pragma omp atomic
#endif
has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT));
}
}
}
free(buf);
#if 0
/* The allreduce for each root acts like we did this: */
MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col);
#endif
MPI_Comm_free(&this_col);
} else {
tg.edgememory = NULL;
tg.edgememory_size = 0;
}
MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
/* Find roots and max used vertex */
{
uint64_t counter = 0;
int bfs_root_idx;
for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
int64_t root;
while (1) {
double d[2];
make_random_numbers(2, seed1, seed2, counter, d);
root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts;
counter += 2;
if (counter > 2 * nglobalverts) break;
int is_duplicate = 0;
int i;
for (i = 0; i < bfs_root_idx; ++i) {
if (root == bfs_roots[i]) {
is_duplicate = 1;
break;
}
}
if (is_duplicate) continue; /* Everyone takes the same path here */
int root_ok = 0;
if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) {
root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0;
}
MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
if (root_ok) break;
}
bfs_roots[bfs_root_idx] = root;
}
num_bfs_roots = bfs_root_idx;
/* Find maximum non-zero-degree vertex. */
{
int64_t i;
max_used_vertex = 0;
if (in_generating_rectangle) {
for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) {
if (i > nglobalverts) continue;
if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) {
max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes;
break;
}
}
}
MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD);
}
}
if (in_generating_rectangle) {
MPI_Free_mem(has_edge);
}
if (tg.data_in_file) {
MPI_File_sync(tg.edgefile);
}
}
double make_graph_stop = MPI_Wtime();
double make_graph_time = make_graph_stop - make_graph_start;
if (rank == 0) { /* Not an official part of the results */
fprintf(stderr, "graph_generation: %f s\n", make_graph_time);
}
/* Make user's graph data structure. */
double data_struct_start = MPI_Wtime();
make_graph_data_structure(&tg);
double data_struct_stop = MPI_Wtime();
double data_struct_time = data_struct_stop - data_struct_start;
if (rank == 0) { /* Not an official part of the results */
fprintf(stderr, "construction_time: %f s\n", data_struct_time);
}
/* Number of edges visited in each BFS; a double so get_statistics can be
* used directly. */
double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double));
/* Run BFS. */
int validation_passed = 1;
double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double));
uint64_t nlocalverts = get_nlocalverts_for_pred();
int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t));
int bfs_root_idx;
for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) {
int64_t root = bfs_roots[bfs_root_idx];
if (rank == 0) fprintf(stderr, "Running BFS %d\n", bfs_root_idx);
/* Clear the pred array. */
memset(pred, 0, nlocalverts * sizeof(int64_t));
/* Do the actual BFS. */
double bfs_start = MPI_Wtime();
run_bfs(root, &pred[0]);
double bfs_stop = MPI_Wtime();
bfs_times[bfs_root_idx] = bfs_stop - bfs_start;
if (rank == 0) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_times[bfs_root_idx]);
/* Validate result. */
if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx);
double validate_start = MPI_Wtime();
int64_t edge_visit_count;
int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count);
double validate_stop = MPI_Wtime();
validate_times[bfs_root_idx] = validate_stop - validate_start;
if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]);
edge_counts[bfs_root_idx] = (double)edge_visit_count;
if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]);
if (!validation_passed_one) {
validation_passed = 0;
if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n");
break;
}
}
MPI_Free_mem(pred);
free(bfs_roots);
free_graph_data_structure();
if (tg.data_in_file) {
MPI_File_close(&tg.edgefile);
} else {
free(tg.edgememory); tg.edgememory = NULL;
}
/* Print results. */
if (rank == 0) {
if (!validation_passed) {
fprintf(stdout, "No results printed for invalid run.\n");
} else {
int i;
fprintf(stdout, "SCALE: %d\n", SCALE);
fprintf(stdout, "edgefactor: %d\n", edgefactor);
fprintf(stdout, "NBFS: %d\n", num_bfs_roots);
fprintf(stdout, "graph_generation: %g\n", make_graph_time);
fprintf(stdout, "num_mpi_processes: %d\n", size);
fprintf(stdout, "construction_time: %g\n", data_struct_time);
double stats[s_LAST];
get_statistics(bfs_times, num_bfs_roots, stats);
fprintf(stdout, "min_time: %g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]);
fprintf(stdout, "median_time: %g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_time: %g\n", stats[s_maximum]);
fprintf(stdout, "mean_time: %g\n", stats[s_mean]);
fprintf(stdout, "stddev_time: %g\n", stats[s_std]);
get_statistics(edge_counts, num_bfs_roots, stats);
fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]);
fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]);
fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]);
fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]);
double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double));
for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i];
get_statistics(secs_per_edge, num_bfs_roots, stats);
fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]);
fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]);
fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]);
fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]);
fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]);
fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]);
/* Formula from:
* Title: The Standard Errors of the Geometric and Harmonic Means and
* Their Application to Index Numbers
* Author(s): Nilan Norris
* Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448
* Publisher(s): Institute of Mathematical Statistics
* Stable URL: http://www.jstor.org/stable/2235723
* (same source as in specification). */
fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1)));
free(secs_per_edge); secs_per_edge = NULL;
free(edge_counts); edge_counts = NULL;
get_statistics(validate_times, num_bfs_roots, stats);
fprintf(stdout, "min_validate: %g\n", stats[s_minimum]);
fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]);
fprintf(stdout, "median_validate: %g\n", stats[s_median]);
fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]);
fprintf(stdout, "max_validate: %g\n", stats[s_maximum]);
fprintf(stdout, "mean_validate: %g\n", stats[s_mean]);
fprintf(stdout, "stddev_validate: %g\n", stats[s_std]);
#if 0
for (i = 0; i < num_bfs_roots; ++i) {
fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]);
}
#endif
}
}
free(bfs_times);
free(validate_times);
cleanup_globals();
MPI_Finalize();
return 0;
}
|
ClangASTHelper.h | //
// Copyright (c) 2012, University of Erlangen-Nuremberg
// Copyright (c) 2012, Siemens AG
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===--- ClangASTHelper.h - Implements helper class for easy clang AST handling. -----===//
//
// This file implements a helper class which contains a few methods for easy clang AST handling.
//
//===---------------------------------------------------------------------------------===//
#ifndef _BACKEND_CLANG_AST_HELPER_H_
#define _BACKEND_CLANG_AST_HELPER_H_
#include "hipacc/AST/ASTNode.h"
#include <clang/AST/ExprCXX.h>
#include <limits>
#include <string>
namespace clang
{
namespace hipacc
{
namespace Backend
{
/** \brief Helper class which contains a few methods for easy clang AST handling. */
class ClangASTHelper final
{
public:
template <typename ElementType> using VectorType = ::llvm::SmallVector<ElementType, 16U>; //!< Type alias for LLVM SmallVector type
typedef VectorType<::clang::Expr*> ExpressionVectorType; //!< Type definition for a vector of expressions.
typedef VectorType<::clang::FunctionDecl*> FunctionDeclarationVectorType; //!< Type definition for a vector of function declarations.
typedef VectorType<::clang::QualType> QualTypeVectorType; //!< Type definition for a vector of qualified types.
typedef VectorType<::clang::Stmt*> StatementVectorType; //!< Type definition for a vector of statements.
typedef VectorType<std::string> StringVectorType; //!< Type definition for a vector of strings.
private:
::clang::ASTContext &_rCtx; //!< A reference to the current AST context.
ClangASTHelper(const ClangASTHelper &) = delete;
ClangASTHelper& operator=(const ClangASTHelper &) = delete;
public:
/** \brief Constructor.
* \param rAstContext A reference to the current AST context. */
ClangASTHelper(::clang::ASTContext &rAstContext) : _rCtx(rAstContext) {}
/** \brief Returns a reference to the current AST context. */
inline ::clang::ASTContext& GetASTContext() { return _rCtx; }
/** \brief Returns the corresponding array type for a qualified clang type.
* \param crElementType A reference to the qualified type whose array type shall be returned.
* \param cszDimension The dimension of the array. */
::clang::QualType GetConstantArrayType(const ::clang::QualType &crElementType, const size_t cszDimension);
/** \brief Returns the corresponding pointer type for a qualified clang type.
* \param crPointeeType A reference to the qualified type whose pointer type shall be returned. */
inline ::clang::QualType GetPointerType(const ::clang::QualType &crPointeeType) { return GetASTContext().getPointerType(crPointeeType); }
/** \name AST node creation methods */
//@{
/** \brief Creates an subscript expression.
* \param pArrayRef A pointer to the expression which represents the array.
* \param pIndexExpression A pointer to the expression object, which returns the index of the subscript.
* \param crReturnType The return type of the array subscript.
* \param bIsLValue Specifies, whether the array subscript expression is used as a L-value of another expression. */
::clang::ArraySubscriptExpr* CreateArraySubscriptExpression(::clang::Expr *pArrayRef, ::clang::Expr *pIndexExpression, const ::clang::QualType &crReturnType, bool bIsLValue = false);
/** \brief Creates a binary operator object of a specified type.
* \param pLhs A pointer to the expression object, which shall be on the left-hand-side.
* \param pRhs A pointer to the expression object, which shall be on the right-hand-side.
* \param eOperatorKind The type of the binary operator.
* \param crReturnType The return type of the operator expression. */
::clang::BinaryOperator* CreateBinaryOperator(::clang::Expr *pLhs, ::clang::Expr *pRhs, ::clang::BinaryOperatorKind eOperatorKind, const ::clang::QualType &crReturnType);
/** \brief Creates a binary operator object which represents the "comma" operator.
* \param pLhs A pointer to the expression object, which shall be on the left-hand-side.
* \param pRhs A pointer to the expression object, which shall be on the right-hand-side. */
::clang::BinaryOperator* CreateBinaryOperatorComma(::clang::Expr *pLhs, ::clang::Expr *pRhs);
/** \brief Creates a binary operator object which represents a "less than" comparison.
* \param pLhs A pointer to the expression object, which shall be on the left-hand-side.
* \param pRhs A pointer to the expression object, which shall be on the right-hand-side. */
::clang::BinaryOperator* CreateBinaryOperatorLessThan(::clang::Expr *pLhs, ::clang::Expr *pRhs);
/** \brief Creates a bool literal expression (i.e. a compile time constant).
* \param bValue The value of the bool literal. */
::clang::CXXBoolLiteralExpr* CreateBoolLiteral(bool bValue);
/** \brief Creates a <b>break</b> statement. */
::clang::BreakStmt* CreateBreakStatement();
/** \brief Wraps a statement object into a compound statement object.
* \param pStatement A pointer to the statement object, which shall be encapsulated into an compound statement. */
::clang::CompoundStmt* CreateCompoundStatement(::clang::Stmt *pStatement);
/** \brief Constructs a compound statement object around a vector of statement objects.
* \param crvecStatements A reference to the statement vector. */
::clang::CompoundStmt* CreateCompoundStatement(const StatementVectorType &crvecStatements);
/** \brief Constructs a conditional operator expression object (i.e. the "<cond> ? <expr_1> : <expr_2>" operator).
* \param pCondition A pointer to the expression object, which represents the condition.
* \param pThenExpr A pointer to the expression object, which will be returned when the condition is evaluated to <b>true</b>.
* \param pElseExpr A pointer to the expression object, which will be returned when the condition is evaluated to <b>false</b>.
* \param crReturnType The return type of the operator expression. */
::clang::ConditionalOperator* CreateConditionalOperator(::clang::Expr *pCondition, ::clang::Expr *pThenExpr, ::clang::Expr *pElseExpr, const ::clang::QualType &crReturnType);
/** \brief Creates a <b>continue</b> statement. */
::clang::ContinueStmt* CreateContinueStatement();
/** \brief Constructs a declaration reference expression which points to a specific declaration.
* \param pValueDecl A pointer to the value declaration object. */
::clang::DeclRefExpr* CreateDeclarationReferenceExpression(::clang::ValueDecl *pValueDecl);
/** \brief Constructs a declaration statement for a specific declaration.
* \param pDeclRef A pointer to a declaration reference expression object which points to the specific declaration. */
::clang::DeclStmt* CreateDeclarationStatement(::clang::DeclRefExpr *pDeclRef);
/** \brief Constructs a declaration statement for a specific declaration.
* \param pValueDecl A pointer to the value declaration object. */
::clang::DeclStmt* CreateDeclarationStatement(::clang::ValueDecl *pValueDecl);
/** \brief Creates a floating point literal expression (i.e. a compile time constant).
* \tparam ValueType The value type of the floating point literal (must be <b>float</b> or <b>double</b>).
* \param TValue The value of the floating point literal. */
template <typename ValueType>
::clang::FloatingLiteral* CreateFloatingLiteral(ValueType TValue)
{
static_assert( ! std::numeric_limits<ValueType>::is_integer, "The value type of a floating point literal cannot be of an integer type!" );
return ASTNode::createFloatingLiteral(GetASTContext(), TValue);
}
/** \brief Constructs a function call expression.
* \param pFunctionDecl A pointer to the function declaration which the constructed call shall point to.
* \param crvecArguments A vector containing the argument expressions for the function call. */
::clang::CallExpr* CreateFunctionCall(::clang::FunctionDecl *pFunctionDecl, const ExpressionVectorType &crvecArguments);
/** \brief Constructs a function declaration statement.
* \param strFunctionName The desired name of the newly declared function.
* \param crReturnType The qualified return type of the function.
* \param crvecArgumentNames A vector containing the names of the function arguments.
* \param crvecArgumentTypes A vector containing the qualified types of the function arguments. */
::clang::FunctionDecl* CreateFunctionDeclaration(std::string strFunctionName, const ::clang::QualType &crReturnType, const StringVectorType &crvecArgumentNames, const QualTypeVectorType &crvecArgumentTypes);
/** \brief Constructs an <b>"if-then-else"</b>-statement.
* \param pCondition A pointer to the condition expression of the <b>if</b>-branch.
* \param pThenBranch A pointer to the body statement of the <b>if</b>-branch.
* \param pElseBranch A pointer to the body statement of the <b>else</b>-branch. If set to <b>nullptr</b>, no <b>else</b>-branch will be created. */
::clang::IfStmt* CreateIfStatement(::clang::Expr *pCondition, ::clang::Stmt *pThenBranch, ::clang::Stmt *pElseBranch = nullptr);
/** \brief Constructs a multi-branch <b>if</b>-statement (i.e. a <b>"if-{else if}-else"</b>-statement).
* \param crvecConditions A vector containing the conditions of all <b>if / else if</b> branches.
* \param crvecBranchBodies A vector containing the body statements of all conditional branches.
* \param pDefaultBranch A pointer to the body statement of the final <b>else</b>-branch. If set to <b>nullptr</b>, no <b>else</b>-branch will be created.
* \remarks The number of conditions must be equal to the number of branch bodies. */
::clang::IfStmt* CreateIfStatement(const ExpressionVectorType &crvecConditions, const StatementVectorType &crvecBranchBodies, ::clang::Stmt *pDefaultBranch = nullptr);
/** \brief Creates an implicit cast expression object.
* \param pOperandExpression A pointer to the expression object whose return type shall be implicitly casted.
* \param crReturnType The qualified return type of the cast.
* \param eCastKind The internal kind of the cast.
* \param bIsLValue Specifies, whether the implicit cast expression is used as a L-value of another expression. */
::clang::ImplicitCastExpr* CreateImplicitCastExpression(::clang::Expr *pOperandExpression, const ::clang::QualType &crReturnType, ::clang::CastKind eCastKind, bool bIsLValue = false);
/** \brief Constructs an init list expression object around a vector of expressions.
* \param crvecExpressions A reference to the expression vector. */
::clang::InitListExpr* CreateInitListExpression(const ExpressionVectorType &crvecExpressions);
/** \brief Creates an integer literal expression (i.e. a compile time constant).
* \tparam ValueType The value type of the integer literal (must be integral).
* \param TValue The value of the integer literal. */
template <typename ValueType>
::clang::IntegerLiteral* CreateIntegerLiteral(ValueType TValue)
{
static_assert( std::numeric_limits<ValueType>::is_integer, "The value type of an integer literal must be of an integer type!" );
return ASTNode::createIntegerLiteral(GetASTContext(), TValue);
}
/** \brief Creates a literal expression (i.e. a compile time constant).
* \tparam ValueType The value type of the literal.
* \param TValue The value of the literal.
* \remarks Depending on the value type, this function construct a bool, integer or floating point literal. */
template <typename ValueType>
::clang::Expr* CreateLiteral(ValueType TValue);
/** \brief Creates a <b>do-while</b>-loop statement.
* \param pCondition The condition expression of the loop.
* \param pBody The statement which represents the loop body. */
::clang::DoStmt* CreateLoopDoWhile(::clang::Expr *pCondition, ::clang::Stmt *pBody);
/** \brief Creates a <b>for</b>-loop statement.
* \param pCondition The condition expression of the loop.
* \param pBody The statement which represents the loop body.
* \param pInitializer The initializer statement of the for-loop (can be <b>NULL</b>).
* \param pIncrement The increment expression of the for-loop, i.e. the expression which will be evaluated after each iteration (can be <b>NULL</b>). */
::clang::ForStmt* CreateLoopFor(::clang::Expr *pCondition, ::clang::Stmt *pBody, ::clang::Stmt *pInitializer = nullptr, ::clang::Expr *pIncrement = nullptr);
/** \brief Creates a <b>while</b>-loop statement.
* \param pCondition The condition expression of the loop.
* \param pBody The statement which represents the loop body. */
::clang::WhileStmt* CreateLoopWhile(::clang::Expr *pCondition, ::clang::Stmt *pBody);
/** \brief Creates a <b>#pragma omp parallel for</b> directive.
* \param pLoop The for-loop statement associated to this directive.
* \param nChunkSize The chunk size of the optional schedule clause. */
::clang::Stmt* CreateOpenMPDirectiveParallelFor(::clang::ForStmt* pLoop, int nChunkSize=1);
/** \brief Creates a parenthesis expression around another expression.
* \param pSubExpression A pointer to the expression object which shall be encapsulated into a parenthesis expression. */
::clang::ParenExpr* CreateParenthesisExpression(::clang::Expr *pSubExpression);
/** \brief Constructs a post increment statement for a declaration reference expression object.
* \param pDeclRef A pointer to the declaration reference expression, which shall be used in the post increment operator. */
::clang::UnaryOperator* CreatePostIncrementOperator(::clang::DeclRefExpr *pDeclRef);
/** \brief Creates a reinterpret cast expression object.
* \param pOperandExpression A pointer to the expression object whose return type shall be implicitly casted.
* \param crReturnType The qualified return type of the cast.
* \param eCastKind The internal kind of the cast.
* \param bIsLValue Specifies, whether the reinterpret cast expression is used as a L-value of another expression. */
::clang::CXXReinterpretCastExpr* CreateReinterpretCast(::clang::Expr *pOperandExpression, const ::clang::QualType &crReturnType, ::clang::CastKind eCastKind, bool bIsLValue = false);
/** \brief Creates a <b>return</b> statement.
* \param pReturnValue A pointer to an expression object whose result shall be returned by the <b>return</b> statement (if set to <b>nullptr</b>, nothing will be returned). */
::clang::ReturnStmt* CreateReturnStatement(::clang::Expr *pReturnValue = nullptr);
/** \brief Creates a static cast expression object.
* \param pOperandExpression A pointer to the expression object whose return type shall be implicitly casted.
* \param crReturnType The qualified return type of the cast.
* \param eCastKind The internal kind of the cast.
* \param bIsLValue Specifies, whether the static cast expression is used as a L-value of another expression. */
::clang::CXXStaticCastExpr* CreateStaticCast(::clang::Expr *pOperandExpression, const ::clang::QualType &crReturnType, ::clang::CastKind eCastKind, bool bIsLValue = false);
/** \brief Creates a string literal expression (i.e. a constant C-string).
* \param strValue The value of the string literal. */
::clang::StringLiteral* CreateStringLiteral(std::string strValue);
/** \brief Creates an unary operator object of a specified type.
* \param pSubExpression A pointer to the expression object, which shall be the sub-expression of the operator.
* \param eOperatorKind The type of the unary operator.
* \param crResultType The return type of the operator expression. */
::clang::UnaryOperator* CreateUnaryOperator(::clang::Expr *pSubExpression, ::clang::UnaryOperatorKind eOperatorKind, const ::clang::QualType &crResultType);
/** \brief Creates a new variable declaration object.
* \param pDeclContext A pointer to the declaration context which the new variable shall be declared in.
* \param crstrVariableName The name of the newly declared variable.
* \param crVariableType The qualified type of newly declared variable.
* \param pInitExpression A pointer to the initialization expression object for the variable declaration (i.e. the R-value of the assignment).
* \remarks The created variable declaration is automatically added to the declaration context of the specified function declaration. */
::clang::VarDecl* CreateVariableDeclaration(::clang::DeclContext *pDeclContext, const std::string &crstrVariableName, const ::clang::QualType &crVariableType, ::clang::Expr *pInitExpression);
/** \brief Creates a new variable declaration object.
* \param pParentFunction A pointer to the function declaration object in whose context the new variable shall be declared.
* \param crstrVariableName The name of the newly declared variable.
* \param crVariableType The qualified type of newly declared variable.
* \param pInitExpression A pointer to the initialization expression object for the variable declaration (i.e. the R-value of the assignment).
* \remarks The created variable declaration is automatically added to the declaration context of the specified function declaration. */
::clang::VarDecl* CreateVariableDeclaration(::clang::FunctionDecl *pParentFunction, const std::string &crstrVariableName, const ::clang::QualType &crVariableType, ::clang::Expr *pInitExpression);
//@}
public:
/** \brief Checks, whether all function declaration objects in a function declaration vector have the same signature.
* \param crvecFunctionDecls A vector containing all function declaration objects whose signature shall be compared. */
static bool AreSignaturesEqual(const FunctionDeclarationVectorType &crvecFunctionDecls);
/** \brief Counts the number of declaration references to a specific declaration inside a statement tree.
* \param pStatement A pointer to the root of the statement tree which shall be parsed for the specified declaration references.
* \param crstrReferenceName The name of the declaration reference whose appearances shall be counted. */
static unsigned int CountNumberOfReferences(::clang::Stmt *pStatement, const std::string &crstrReferenceName);
/** \brief Looks up a specific declaration.
* \param pFunction A pointer to the function declaration object whose declaration context will be searched for the specified declaration.
* \param crstrDeclName The name of the declaration which shall be searched for.
* \return If successful, a pointer to a newly created declaration reference expression for the found declaration, and zero otherwise. */
::clang::DeclRefExpr* FindDeclaration(::clang::FunctionDecl *pFunction, const std::string &crstrDeclName);
/** \brief Returns the fully qualified name of a function declaration, i.e. the function name with all preceding namespace names.
* \param pFunctionDecl A pointer to the function declaration object, whose fully qualified name shall be retrieved. */
static std::string GetFullyQualifiedFunctionName(::clang::FunctionDecl *pFunctionDecl);
/** \brief Returns a vector of all known function declarations in the encapsulated AST context.
* \remarks This method parses all namespaces, beginning with the global namespace. */
FunctionDeclarationVectorType GetKnownFunctionDeclarations();
/** \brief Returns a vector of all known function declarations inside of a declaration context.
* \param pDeclContextRoot A pointer to the declaration context object which shall be parsed for function declarations.
* \remarks This method also parses all child declaration contexts of the specified root declaration context. */
FunctionDeclarationVectorType GetFunctionDeclarationsFromContext(::clang::DeclContext *pDeclContextRoot);
/** \brief Checks whether a statement tree has only one branch (i.e. none of its nodes has more than one child).
* \param pStatement A pointer to the root of the statement tree. */
static bool IsSingleBranchStatement(::clang::Stmt *pStatement);
/** \brief Replaces <b>all</b> instances of a declaration reference in a statement tree by a new value declaration.
* \param pStatement A pointer to the root of the statement tree which shall be parsed for the specified declaration references.
* \param crstrDeclRefName The name of the declaration reference which shall be replaced.
* \param pNewDecl A pointer to the value declaration to which all reference will be updated. */
static void ReplaceDeclarationReferences(::clang::Stmt* pStatement, const std::string &crstrDeclRefName, ::clang::ValueDecl *pNewDecl);
/** \brief Determines whether a pointer is referencing to a type marked by the const qualifier.
* \param crPointer The qualified type of the pointer. */
bool IsPointerToConstType(const QualType& crPointer);
};
// Template function specializations
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(bool TValue) { return CreateBoolLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(int8_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(uint8_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(int16_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(uint16_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(int32_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(uint32_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(int64_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(uint64_t TValue) { return CreateIntegerLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(float TValue) { return CreateFloatingLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(double TValue) { return CreateFloatingLiteral(TValue); }
template<> inline ::clang::Expr* ClangASTHelper::CreateLiteral(std::string TValue) { return CreateStringLiteral(TValue); }
} // end namespace Backend
} // end namespace hipacc
} // end namespace clang
#endif // _BACKEND_CLANG_AST_HELPER_H_
// vim: set ts=2 sw=2 sts=2 et ai:
|
GB_binop__ne_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int32)
// A*D function (colscale): GB (_AxD__ne_int32)
// D*A function (rowscale): GB (_DxB__ne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int32)
// C=scalar+B GB (_bind1st__ne_int32)
// C=scalar+B' GB (_bind1st_tran__ne_int32)
// C=A+scalar GB (_bind2nd__ne_int32)
// C=A'+scalar GB (_bind2nd_tran__ne_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT32 || GxB_NO_NE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matmul_openmp.c | /*
* matmul_openmp.c
* Matrix multiplication using OpenMP
* Author : Thomas Sunghoon Heo
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <math.h>
#include "utils.h"
typedef struct matrix_s
{
int rows;
int cols;
int stride;
double *elements;
}matrix_t;
// execution time check
clock_t start, end;
const int ROWSA = 4096;
const int COLSA = 4096;
const int ROWSB = 4096;
const int COLSB = 4096;
int NUM_THREADS = 4;
matrix_t *init_matrix(int rows, int cols)
{
matrix_t *matrix = (matrix_t *)malloc(sizeof(matrix_t));
size_t allocs = rows * cols * sizeof(double);
matrix->elements = (double *)malloc(allocs);
int i;
for(i = 0; i < rows * cols; i++)
{
matrix->elements[i] = sin((double)i) + cos((double)(i+1));
}
matrix->rows = rows;
matrix->cols = cols;
matrix->stride = cols;
return matrix;
}
void matmul_omp_kernel(matrix_t *A, matrix_t *B, matrix_t *C)
{
NUM_THREADS = omp_get_max_threads();
// omp_set_num_threads(NUM_THREADS);
double st, ed;
st = omp_get_wtime();
#pragma omp parallel num_threads(NUM_THREADS)
{
int tid = omp_get_thread_num();
int row_chunk = A->rows / NUM_THREADS;
int col_chunk = B->cols / NUM_THREADS;
int row_start = tid * row_chunk;
int row_end = (tid + 1) * row_chunk;
int col_start = tid * col_chunk;
int col_end = (tid + 1) * col_chunk;
printf("[Thread -%d] Work start A[%d,%d] x B[%d,%d]\n", tid, row_start, row_end, col_start, col_end);
int i,j,k;
double sum, a_ik, b_kj;
for(i = row_start; i < row_end; i++)
{
for(j = col_start; j < col_end; j++)
{
sum = 0.0;
for(k = 0; k < B->rows; k++)
{
a_ik = matrix_at(A->elements, i, k, A->stride);
b_kj = matrix_at(B->elements, k, j , B->stride);
sum += (a_ik* b_kj);
}
set_elem_at(C->elements, i, j ,C->stride, sum);
}
}
} // end of parallel section
ed = omp_get_wtime() - st;
printf("%lf sec elapsed\n" , ed);
}
void print_10x10(matrix_t *m)
{
int i,j;
for(i = 0; i < 10; i++)
{
for(j = 0; j < 10; j++)
{
printf("%.2lf ", m->elements[i * m->cols + j]);
}
printf("\n");
}
}
int main()
{
matrix_t *A, *B, *C;
A = init_matrix(ROWSA, COLSA); B= init_matrix(ROWSB, COLSB); C = init_matrix(ROWSA, COLSB);
printf("matrix multiplication using OpenMP\n");
int max_threads = omp_get_max_threads();
printf("maximum %d threads are available\n" , max_threads);
matmul_omp_kernel(A,B,C);
print_10x10(C);
free(A->elements); free(B->elements); free(C->elements);
free(A); free(B); free(C);
}
|
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/MagickCore.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view));
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickCoreSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickCoreSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DuplexTransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticMetacontent() returns the image view authentic
% meta-content.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% void *GetImageViewAuthenticMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport void *GetImageViewAuthenticMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% Quantum *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Quantum *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MagickPathExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualMetacontent() returns the image view virtual
% meta-content.
%
% The format of the GetImageViewVirtualMetacontent method is:
%
% const void *GetImageViewVirtualMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const void *GetImageViewVirtualMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const Quantum *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const Quantum *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view));
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height,
ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view));
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickCoreSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetImageViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict pixels;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
status=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (status == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UpdateImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
FeatureLPPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/FeatureLPPooling.c"
#else
#ifndef FEATURE_LP_DEFS
#define FEATURE_LP_DEFS
#ifdef _MSC_VER
#define FEATURE_LP_SIZE_TYPE int64_t
#define FEATURE_LP_CAST_TYPE (int64_t)
#else
#define FEATURE_LP_SIZE_TYPE size_t
#define FEATURE_LP_CAST_TYPE
#endif
typedef struct {
size_t size[4];
size_t stride[4];
} FeatureLPPoolingSizes;
static inline size_t flpGetOffset(FeatureLPPoolingSizes* s,
FEATURE_LP_SIZE_TYPE batch,
FEATURE_LP_SIZE_TYPE feature,
FEATURE_LP_SIZE_TYPE opt1,
FEATURE_LP_SIZE_TYPE opt2) {
return s->stride[0] * batch +
s->stride[1] * feature +
s->stride[2] * opt1 +
s->stride[3] * opt2;
}
static inline size_t flpOutputSize(FEATURE_LP_SIZE_TYPE inputSize,
FEATURE_LP_SIZE_TYPE width,
FEATURE_LP_SIZE_TYPE stride) {
return ((inputSize - width) / stride) + 1;
}
#endif // FEATURE_LP_DEFS
FeatureLPPoolingSizes
THNN_(FeatureLPPooling_upcastCPU)(THTensor* t, bool batchMode) {
int dim = THTensor_(nDimension)(t);
// Upcast to [batch dim][feature dim][opt dim 1][opt dim 2]
FeatureLPPoolingSizes s;
for (int i = 0; i < 4; ++i) {
s.size[i] = 1;
s.stride[i] = 1;
}
if (dim == 1) {
THAssert(!batchMode);
// [feature dim]
s.size[1] = THTensor_(size)(t, 0);
s.stride[1] = THTensor_(stride)(t, 0);
} else if (dim == 2) {
if (batchMode) {
// [batch dim][feature dim]
for (int i = 0; i < 2; ++i) {
s.size[i] = THTensor_(size)(t, i);
s.stride[i] = THTensor_(stride)(t, i);
}
} else {
// [feature dim][opt dim 1]
s.size[1] = THTensor_(size)(t, 0);
s.stride[1] = THTensor_(stride)(t, 0);
s.size[2] = THTensor_(size)(t, 1);
s.stride[2] = THTensor_(stride)(t, 1);
}
} else if (dim == 3) {
if (batchMode) {
// [batch dim][feature dim][opt dim 1]
for (int i = 0; i < 3; ++i) {
s.size[i] = THTensor_(size)(t, i);
s.stride[i] = THTensor_(stride)(t, i);
}
} else {
// [feature dim][opt dim 1][opt dim 2]
for (int i = 1; i < 4; ++i) {
s.size[i] = THTensor_(size)(t, i - 1);
s.stride[i] = THTensor_(stride)(t, i - 1);
}
}
} else if (dim == 4) {
// [batch dim][feature dim][opt dim 1][opt dim 2]
THAssert(batchMode);
for (int i = 0; i < 4; ++i) {
s.size[i] = THTensor_(size)(t, i);
s.stride[i] = THTensor_(stride)(t, i);
}
}
return s;
}
void
THNN_(FeatureLPPooling_resizeForOutputCPU)(THTensor* toResize,
THTensor* input,
bool batchMode,
int width,
int stride) {
int inputDim = THTensor_(nDimension)(input);
THAssert(inputDim >= 1 && inputDim <= 4);
int64_t outSize =
flpOutputSize(THTensor_(size)(input, 0), width, stride);
if (batchMode) {
THAssert(inputDim > 1);
outSize =
flpOutputSize(THTensor_(size)(input, 1), width, stride);
} else {
THAssert(inputDim < 4);
}
if (inputDim == 1) {
THTensor_(resize1d)(toResize, outSize);
} else if (inputDim == 2) {
if (batchMode) {
THTensor_(resize2d)(toResize,
THTensor_(size)(input, 0),
outSize);
} else {
THTensor_(resize2d)(toResize,
outSize,
THTensor_(size)(input, 1));
}
} else if (inputDim == 3) {
if (batchMode) {
THTensor_(resize3d)(toResize,
THTensor_(size)(input, 0), outSize,
THTensor_(size)(input, 2));
} else {
THTensor_(resize3d)(toResize,
outSize, THTensor_(size)(input, 1),
THTensor_(size)(input, 2));
}
} else if (inputDim == 4) {
THTensor_(resize4d)(toResize,
THTensor_(size)(input, 0),
outSize,
THTensor_(size)(input, 2),
THTensor_(size)(input, 3));
}
}
// Makes `toResize` the same size/dimensionality as `src`
void
THNN_(FeatureLPPooling_resizeCPU)(THTensor* toResize,
THTensor* src) {
int inputDim = THTensor_(nDimension)(src);
THAssert(inputDim >= 1 && inputDim <= 4);
if (inputDim == 1) {
THTensor_(resize1d)(toResize,
THTensor_(size)(src, 0));
} else if (inputDim == 2) {
THTensor_(resize2d)(
toResize,
THTensor_(size)(src, 0),
THTensor_(size)(src, 1));
} else if (inputDim == 3) {
THTensor_(resize3d)(
toResize,
THTensor_(size)(src, 0),
THTensor_(size)(src, 1),
THTensor_(size)(src, 2));
} else if (inputDim == 4) {
THTensor_(resize4d)(
toResize,
THTensor_(size)(src, 0),
THTensor_(size)(src, 1),
THTensor_(size)(src, 2),
THTensor_(size)(src, 3));
}
}
void
THNN_(FeatureLPPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
accreal power,
int width,
int stride,
bool batchMode) {
int inputDim = THTensor_(nDimension)(input);
if (batchMode) {
THArgCheck(inputDim >= 2 && inputDim <= 4, 2,
"input must be 2-4 dimensions for batch mode");
} else {
THArgCheck(inputDim >= 1 && inputDim <= 3, 2,
"input must be 1-3 dimensions for non-batch mode");
}
FeatureLPPoolingSizes inputDesc =
THNN_(FeatureLPPooling_upcastCPU)(input, batchMode);
// Make sure the feature dimension is properly sized
THArgCheck(inputDesc.size[1] >= (FEATURE_LP_SIZE_TYPE) width, 3,
"input: feature dimension must be >= width");
// Make sure that width and stride are within range
THArgCheck(width >= 2 && width <= 16, 5,
"width must be between 2 - 16");
THArgCheck(stride >= 1 && stride <= 4, 6,
"stride must be between 1 - 4");
// Resize output
THNN_(FeatureLPPooling_resizeForOutputCPU)(
output, input, batchMode, width, stride);
FeatureLPPoolingSizes outputDesc =
THNN_(FeatureLPPooling_upcastCPU)(output, batchMode);
real* inputP = THTensor_(data)(input);
real* outputP = THTensor_(data)(output);
FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i;
#pragma omp parallel for
for (batch = 0; batch < FEATURE_LP_CAST_TYPE inputDesc.size[0]; ++batch) {
for (opt1 = 0; opt1 < FEATURE_LP_CAST_TYPE inputDesc.size[2]; ++opt1) {
for (opt2 = 0; opt2 < FEATURE_LP_CAST_TYPE inputDesc.size[3]; ++opt2) {
for (outputFeature = 0;
outputFeature < FEATURE_LP_CAST_TYPE outputDesc.size[1]; ++outputFeature) {
accreal v = (accreal) 0;
for (i = 0; i < (FEATURE_LP_SIZE_TYPE) width; ++i) {
FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i;
if (inputFeature >= FEATURE_LP_CAST_TYPE inputDesc.size[1]) {
break;
}
v +=
pow(inputP[flpGetOffset(&inputDesc,
batch,
inputFeature,
opt1,
opt2)], power);
}
outputP[flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)] =
pow(v, (accreal) 1 / power);
}
}
}
}
}
void
THNN_(FeatureLPPooling_updateGradInput)(
THNNState *state,
THTensor* gradOutput,
THTensor* input,
THTensor* output,
THTensor* gradInput,
accreal power,
int width,
int stride,
bool batchMode) {
int inputDim = THTensor_(nDimension)(input);
if (batchMode) {
THArgCheck(inputDim >= 2 && inputDim <= 4, 3,
"input must be 2-4 dimensions for batch mode");
} else {
THArgCheck(inputDim >= 1 && inputDim <= 3, 3,
"input must be 1-3 dimensions for non-batch mode");
}
FeatureLPPoolingSizes inputDesc =
THNN_(FeatureLPPooling_upcastCPU)(input, batchMode);
FeatureLPPoolingSizes gradOutputDesc =
THNN_(FeatureLPPooling_upcastCPU)(gradOutput, batchMode);
FeatureLPPoolingSizes outputDesc =
THNN_(FeatureLPPooling_upcastCPU)(output, batchMode);
// Make sure the feature dimension is properly sized
THArgCheck(inputDesc.size[1] >= (FEATURE_LP_SIZE_TYPE) width, 3,
"input: feature dimension must be >= width");
// Make sure that width and stride are within range
THArgCheck(width >= 2 && width <= 16, 7,
"width must be between 2 - 16");
THArgCheck(stride >= 1 && stride <= 4, 8,
"stride must be between 1 - 4");
for (int i = 0; i < 4; ++i) {
THAssertMsg(outputDesc.size[i] == gradOutputDesc.size[i],
"output and gradOutput sizes do not match");
}
// Make sure that the input sizes produce the output sizes
THArgCheck(flpOutputSize(FEATURE_LP_CAST_TYPE inputDesc.size[1], width, stride) ==
outputDesc.size[1], 3,
"input and output sizes do not match with respect to "
"width and stride");
// Resize `gradInput` based on `input`
THNN_(FeatureLPPooling_resizeCPU)(gradInput, input);
// Zero gradInput for accumulation
THTensor_(zero)(gradInput);
FeatureLPPoolingSizes gradInputDesc =
THNN_(FeatureLPPooling_upcastCPU)(gradInput, batchMode);
real* gradOutputP = THTensor_(data)(gradOutput);
real* gradInputP = THTensor_(data)(gradInput);
real* outputP = THTensor_(data)(output);
real* inputP = THTensor_(data)(input);
FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i;
#pragma omp parallel for
for (batch = 0; batch < FEATURE_LP_CAST_TYPE inputDesc.size[0]; ++batch) {
for (opt1 = 0; opt1 < FEATURE_LP_CAST_TYPE inputDesc.size[2]; ++opt1) {
for (opt2 = 0; opt2 < FEATURE_LP_CAST_TYPE inputDesc.size[3]; ++opt2) {
for (outputFeature = 0;
outputFeature < FEATURE_LP_CAST_TYPE outputDesc.size[1]; ++outputFeature) {
// Load output (f(x_is)). It is possible that this is zero, in
// which case we'll ignore this point.
real outputV =
outputP[
flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)];
if (outputV == (real) 0) {
continue;
}
for (i = 0; i < (FEATURE_LP_SIZE_TYPE) width; ++i) {
FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i;
THAssert(inputFeature < inputDesc.size[1]);
real gradOutputV =
gradOutputP[
flpGetOffset(&gradOutputDesc, batch, outputFeature, opt1, opt2)];
real inputV =
inputP[
flpGetOffset(&inputDesc, batch, inputFeature, opt1, opt2)];
// Calculate grad * (x_i / f(x_is))^(p - 1)
real v = gradOutputV * pow(inputV / outputV, power - (accreal) 1);
gradInputP[
flpGetOffset(&gradInputDesc, batch, inputFeature, opt1, opt2)]
+= v;
}
}
}
}
}
}
#endif
|
declare_reduction_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix=CHECK-LOAD %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes -fopenmp-version=45
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes -fopenmp-version=45 | FileCheck --check-prefixes=CHECK-LOAD,OMP45-LOAD %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp-simd -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK: [[SSS_INT:.+]] = type { i32 }
// CHECK-LOAD: [[SSS_INT:.+]] = type { i32 }
// CHECK-DAG: [[SSS_INIT:@.+]] = private constant %struct.SSS zeroinitializer
// CHECK-DAG: [[INT_INIT:@.+]] = private constant i32 0
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: store i32 [[MUL]], i32*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// CHECK-LOAD: sext i8
// CHECK-LOAD: sext i8
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig)
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK: [[ADD:%.+]] = fadd float
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK: [[ADD:%.+]] = fadd float 1.5
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK-LOAD: [[ADD:%.+]] = fadd float
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1)
// CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
};
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LABEL: @main
// CHECK-LOAD-LABEL: @main
int main(void) {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
}
return 0;
}
// OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1)
// OMP45-LOAD: [[MUL:%.+]] = mul nsw i32
// OMP45-LOAD-NEXT: store i32 [[MUL]], i32*
// OMP45-LOAD-NEXT: ret void
// OMP45-LOAD-NEXT: }
// OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1)
// OMP45-LOAD: sext i8
// OMP45-LOAD: sext i8
// OMP45-LOAD: [[MUL:%.+]] = mul nsw i32
// OMP45-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// OMP45-LOAD-NEXT: store i8 [[TRUNC]], i8*
// OMP45-LOAD-NEXT: ret void
// OMP45-LOAD-NEXT: }
// CHECK-LABEL: bar
struct SSS ss;
int in;
void bar(void) {
// CHECK: [[SS_PRIV:%.+]] = alloca %struct.SSS,
// CHECK: [[IN_PRIV:%.+]] = alloca i32,
// CHECK: [[BC:%.+]] = bitcast %struct.SSS* [[SS_PRIV]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{64|32}}(i8* {{.*}}[[BC]], i8* {{.*}}bitcast (%struct.SSS* [[SSS_INIT]] to i8*), i{{64|32}} 4, i1 false)
// CHECK: [[IN_VAL:%.+]] = load i32, i32* [[INT_INIT]],
// CHECK: store i32 [[IN_VAL]], i32* [[IN_PRIV]],
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp declare reduction(+ \
: struct SSS \
: omp_out = omp_in)
#pragma omp declare reduction(+ \
: int \
: omp_out = omp_in)
#pragma omp for reduction(+ \
: ss, in)
for (int i = 0; i < 10; ++i)
;
}
#endif
|
DepositCircuit.h | #ifndef _DEPOSITCIRCUIT_H_
#define _DEPOSITCIRCUIT_H_
#include "Circuit.h"
#include "../Utils/Constants.h"
#include "../Utils/Data.h"
#include "../ThirdParty/BigIntHeader.hpp"
#include "ethsnarks.hpp"
#include "utils.hpp"
#include "gadgets/sha256_many.hpp"
using namespace ethsnarks;
namespace Loopring
{
class DepositGadget : public GadgetT
{
public:
const Constants& constants;
// User state
BalanceGadget balanceBefore;
AccountGadget accountBefore;
// Inputs
DualVariableGadget accountID;
DualVariableGadget tokenID;
DualVariableGadget amount;
DualVariableGadget publicKeyX;
DualVariableGadget publicKeyY;
// Calculate the new balance
UnsafeAddGadget uncappedBalanceAfter;
MinGadget balanceAfter;
// Update User
UpdateBalanceGadget updateBalance;
UpdateAccountGadget updateAccount;
DepositGadget(
ProtoboardT& pb,
const Constants& _constants,
const VariableT& root,
const std::string& prefix
) :
GadgetT(pb, prefix),
constants(_constants),
// User state
balanceBefore(pb, FMT(prefix, ".balanceBefore")),
accountBefore(pb, FMT(prefix, ".accountBefore")),
// Inputs
accountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID")),
tokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".tokenID")),
amount(pb, NUM_BITS_AMOUNT, FMT(prefix, ".amount")),
publicKeyX(pb, 256, FMT(prefix, ".publicKeyX")),
publicKeyY(pb, 256, FMT(prefix, ".publicKeyY")),
// Calculate the new balance
// We can't let the deposit fail (it's onchain so it needs to be included),
// and we do want to cap the balance to NUM_BITS_AMOUNT bits max, so cap the balance even
// if it means that the user loses some tokens (NUM_BITS_AMOUNT bits should be more than enough).
uncappedBalanceAfter(pb, balanceBefore.balance, amount.packed, FMT(prefix, ".uncappedBalanceAfter")),
balanceAfter(pb, uncappedBalanceAfter.result(), constants.maxAmount, NUM_BITS_AMOUNT + 1, FMT(prefix, ".balanceAfter")),
// Update User
updateBalance(pb, accountBefore.balancesRoot, tokenID.bits,
{balanceBefore.balance, balanceBefore.tradingHistory},
{balanceAfter.result(), balanceBefore.tradingHistory},
FMT(prefix, ".updateBalance")),
updateAccount(pb, root, accountID.bits,
{accountBefore.publicKey.x, accountBefore.publicKey.y, accountBefore.nonce, accountBefore.balancesRoot},
{publicKeyX.packed, publicKeyY.packed, accountBefore.nonce, updateBalance.result()},
FMT(prefix, ".updateAccount"))
{
}
void generate_r1cs_witness(const Deposit& deposit)
{
// User state
balanceBefore.generate_r1cs_witness(deposit.balanceUpdate.before);
accountBefore.generate_r1cs_witness(deposit.accountUpdate.before);
// Inputs
accountID.generate_r1cs_witness(pb, deposit.accountUpdate.accountID);
tokenID.generate_r1cs_witness(pb, deposit.balanceUpdate.tokenID);
amount.generate_r1cs_witness(pb, deposit.amount);
publicKeyX.generate_r1cs_witness(pb, deposit.accountUpdate.after.publicKey.x);
publicKeyY.generate_r1cs_witness(pb, deposit.accountUpdate.after.publicKey.y);
// Calculate the new balance
uncappedBalanceAfter.generate_r1cs_witness();
balanceAfter.generate_r1cs_witness();
// Update User
updateBalance.generate_r1cs_witness(deposit.balanceUpdate.proof);
updateAccount.generate_r1cs_witness(deposit.accountUpdate.proof);
}
void generate_r1cs_constraints()
{
// Inputs
accountID.generate_r1cs_constraints(true);
tokenID.generate_r1cs_constraints(true);
amount.generate_r1cs_constraints(true);
publicKeyX.generate_r1cs_constraints(true);
publicKeyY.generate_r1cs_constraints(true);
// Calculate the new balance
uncappedBalanceAfter.generate_r1cs_constraints();
balanceAfter.generate_r1cs_constraints();
// Update User
updateBalance.generate_r1cs_constraints();
updateAccount.generate_r1cs_constraints();
}
const std::vector<VariableArrayT> getOnchainData() const
{
return {accountID.bits,
publicKeyX.bits, publicKeyY.bits,
VariableArrayT(6, constants.zero), tokenID.bits,
amount.bits};
}
const VariableT& getNewAccountsRoot() const
{
return updateAccount.result();
}
};
class DepositCircuit : public Circuit
{
public:
PublicDataGadget publicData;
Constants constants;
// Inputs
DualVariableGadget exchangeID;
DualVariableGadget merkleRootBefore;
DualVariableGadget merkleRootAfter;
DualVariableGadget depositBlockHashStart;
DualVariableGadget startIndex;
DualVariableGadget count;
// Deposits
unsigned int numDeposits;
std::vector<DepositGadget> deposits;
std::vector<sha256_many> hashers;
DepositCircuit(ProtoboardT& pb, const std::string& prefix) :
Circuit(pb, prefix),
publicData(pb, FMT(prefix, ".publicData")),
constants(pb, FMT(prefix, ".constants")),
// Inputs
exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")),
merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")),
merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")),
depositBlockHashStart(pb, 256, FMT(prefix, ".depositBlockHashStart")),
startIndex(pb, 32, FMT(prefix, ".startIndex")),
count(pb, 32, FMT(prefix, ".count"))
{
}
void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override
{
this->numDeposits = blockSize;
constants.generate_r1cs_constraints();
// Inputs
exchangeID.generate_r1cs_constraints(true);
merkleRootBefore.generate_r1cs_constraints(true);
merkleRootAfter.generate_r1cs_constraints(true);
depositBlockHashStart.generate_r1cs_constraints(true);
startIndex.generate_r1cs_constraints(true);
count.generate_r1cs_constraints(true);
// Deposits
deposits.reserve(numDeposits);
hashers.reserve(numDeposits);
for (size_t j = 0; j < numDeposits; j++)
{
VariableT depositAccountsRoot = (j == 0) ? merkleRootBefore.packed : deposits.back().getNewAccountsRoot();
deposits.emplace_back(
pb,
constants,
depositAccountsRoot,
std::string("deposit_") + std::to_string(j)
);
deposits.back().generate_r1cs_constraints();
// Hash data from deposit
std::vector<VariableArrayT> depositData = deposits.back().getOnchainData();
std::vector<VariableArrayT> hashBits;
hashBits.push_back(reverse((j == 0) ? depositBlockHashStart.bits : hashers.back().result().bits));
hashBits.insert(hashBits.end(), depositData.begin(), depositData.end());
hashers.emplace_back(pb, flattenReverse(hashBits), std::string("hash_") + std::to_string(j));
hashers.back().generate_r1cs_constraints();
}
// Public data
publicData.add(exchangeID.bits);
publicData.add(merkleRootBefore.bits);
publicData.add(merkleRootAfter.bits);
publicData.add(reverse(depositBlockHashStart.bits));
publicData.add(reverse(hashers.back().result().bits));
publicData.add(startIndex.bits);
publicData.add(count.bits);
publicData.generate_r1cs_constraints();
// Check the new merkle root
requireEqual(pb, deposits.back().getNewAccountsRoot(), merkleRootAfter.packed, "newMerkleRoot");
}
bool generateWitness(const DepositBlock& block)
{
constants.generate_r1cs_witness();
// Inputs
exchangeID.generate_r1cs_witness(pb, block.exchangeID);
merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore);
merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter);
depositBlockHashStart.generate_r1cs_witness(pb, block.startHash);
startIndex.generate_r1cs_witness(pb, block.startIndex);
count.generate_r1cs_witness(pb, block.count);
// printBits("start hash input: 0x", depositBlockHashStart.get_bits(pb), true);
// Deposits
assert(deposits.size() == hashers.size());
#ifdef MULTICORE
#pragma omp parallel for
#endif
for(unsigned int i = 0; i < block.deposits.size(); i++)
{
deposits[i].generate_r1cs_witness(block.deposits[i]);
}
// Cannot be done in parallel
for(unsigned int i = 0; i < block.deposits.size(); i++)
{
hashers[i].generate_r1cs_witness();
}
// printBits("DepositBlockHash: 0x", hashers.back().result().bits.get_bits(pb));
// Public data
publicData.generate_r1cs_witness();
return true;
}
bool generateWitness(const json& input) override
{
return generateWitness(input.get<Loopring::DepositBlock>());
}
BlockType getBlockType() override
{
return BlockType::Deposit;
}
unsigned int getBlockSize() override
{
return numDeposits;
}
void printInfo() override
{
std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numDeposits) << "/deposit)" << std::endl;
}
};
}
#endif
|
mainp.c | #include <stdio.h>
#include <omp.h>
int simpleLoop(int n)
{
int sum = 0;
#pragma omp parallel for private(i) reduction(+:sum)
for (int i = 0; i < n; i++)
{
sum += i;
}
return sum;
}
int main()
{
int res = simpleLoop(10);
printf("%d\n", res);
return 0;
}
|
kmeans_h2o4gpu.h | /*!
* Copyright 2017 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#pragma once
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include "kmeans_labels.h"
#include "kmeans_centroids.h"
template<typename T>
struct count_functor {
T* pairwise_distances_ptr;
int* counts_ptr;
int k;
int rows_per_run;
count_functor(T* _pairwise_distances_ptr, int* _counts_ptr, int _k, int _rows_per_run) {
pairwise_distances_ptr = _pairwise_distances_ptr;
counts_ptr = _counts_ptr;
k = _k;
rows_per_run = _rows_per_run;
}
__device__
void operator()(int idx) const {
int closest_centroid_idx = 0;
T best_distance = pairwise_distances_ptr[idx];
// FIXME potentially slow due to striding
for (int i = 1; i < k; i++) {
T distance = pairwise_distances_ptr[idx + i * rows_per_run];
if (distance < best_distance) {
best_distance = distance;
closest_centroid_idx = i;
}
}
atomicAdd(&counts_ptr[closest_centroid_idx], 1);
}
};
/**
* Calculates closest centroid for each record and counts how many points are assigned to each centroid.
* @tparam T
* @param verbose
* @param num_gpu
* @param rows_per_gpu
* @param cols
* @param data
* @param data_dots
* @param centroids
* @param weights
* @param pairwise_distances
* @param labels
*/
template<typename T>
void count_pts_per_centroid(
int verbose,
int num_gpu, int rows_per_gpu, int cols,
thrust::device_vector<T> **data,
thrust::device_vector<T> **data_dots,
thrust::host_vector<T> centroids,
thrust::host_vector<T> &weights
) {
int k = centroids.size() / cols;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
thrust::host_vector<int> weights_tmp(weights.size());
CUDACHECK(cudaSetDevice(i));
thrust::device_vector<T> centroid_dots(k);
thrust::device_vector<T> d_centroids = centroids;
thrust::device_vector<int> counts(k);
kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, k,
*data[i], d_centroids, *data_dots[i], centroid_dots,
[&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) {
auto counting = thrust::make_counting_iterator(0);
auto counts_ptr = thrust::raw_pointer_cast(counts.data());
auto pairwise_distances_ptr = thrust::raw_pointer_cast(pairwise_distances.data());
thrust::for_each(counting,
counting + rows_per_run,
count_functor<T>(pairwise_distances_ptr, counts_ptr, k, rows_per_run)
);
}
);
kmeans::detail::memcpy(weights_tmp, counts);
kmeans::detail::streamsync(i);
for (int p = 0; p < k; p++) {
weights[p] += weights_tmp[p];
}
}
} |
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(8*t2-Nz-1020,1024)),ceild(4*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t3+Nx,1024),floord(Nt+Nx-4,1024)),floord(4*t1+Nx+5,1024)),floord(8*t2+Nx+4,1024)),floord(8*t1-8*t2+Nz+Nx+3,1024));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),1024*t4+1022),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
demosaicframes.c | /*! @file demosaicframes.c
* @brief CFA Bayer tools
*
* @version 1.0.0
*
* (C) Copyright 2017 GoPro Inc (http://gopro.com/).
*
* Licensed under either:
* - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
* - MIT license, http://opensource.org/licenses/MIT
* at your option.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#define DEMOSAICFRAMESLIB_EXPORTS
#define __STDC_LIMIT_MACROS
#include <stdint.h>
#include <math.h>
#include <assert.h>
#include <emmintrin.h> // SSE2 intrinsics
#ifdef _WIN32
#include <windows.h>
#elif __APPLE__
#include <CoreFoundation/CoreFoundation.h> // for propertylist/preferences
#else
#include <xmmintrin.h>
#endif
#include "codec.h"
#include "swap.h"
#include "config.h"
#include "encoder.h"
#include "color.h"
#include "metadata.h"
#include "convert.h"
#include "lutpath.h"
#include "demosaicframes.h"
typedef int DEBAYER_ORDERING;
#define BAYER_FORMAT_RED_GRN 0
#define BAYER_FORMAT_GRN_RED 1
#define BAYER_FORMAT_GRN_BLU 2
#define BAYER_FORMAT_BLU_GRN 3
#ifdef __cplusplus
extern "C" {
#endif
// Forward references
uint32_t gencrc(unsigned char *buf, int len);
void GetCurrentID(DECODER *decoder, unsigned char *ptr, unsigned int len, char *id, unsigned int id_size);
void UpdateCFHDDATA(DECODER *decoder, unsigned char *ptr, int len, int delta, int priority);
void DebayerLine(int width, int height, int linenum,
unsigned short *bayer_source,
DEBAYER_ORDERING order,
unsigned short *RGB_output,
int highquality,
int sharpening);
void ColorDifference2Bayer(int width,
unsigned short *srcptr,
int bayer_pitch,
int bayer_format);
void BayerRippleFilter(int width,
unsigned short *srcptr,
int bayer_pitch,
int bayer_formatm,
unsigned short *srcbase);
void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height,
int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize);
void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize);
void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
#ifdef __cplusplus
}
#endif
#define MAKEID(a,b,c,d) ((a<<24)|(b<<16)|(c<<8)|(d))
#define MAKEID_SWAP(d,c,b,a) ((a<<24)|(b<<16)|(c<<8)|(d))
#define T_VALUE 15*256
#define SATURATE16(x) (((x) > 0) ? (((x) <= 65535) ? (x) : 65535) : 0)
#define DEBAYER5x5 1
#define CF_ENHANCE 1 //CineForm Enhancement Debayer
void REDCELL(unsigned short *rgbptr, unsigned short *bayerptr, int width)
{
int r, g, b;
{
/* normal 5x5 */
#if CF_ENHANCE
int diffR = abs((int)bayerptr[-2] - (int)bayerptr[2]) >> 10;
int diffG = abs((int)bayerptr[-1] - (int)bayerptr[1]) >> 10;
int diffB = abs((int)bayerptr[-1 * width - 1] - (int)bayerptr[1 * width + 1]) >> 10;
int factorR = (2 + (2 * diffR * diffR / (2 + diffG * diffG)));
int factorB = (4 + (4 * diffG * diffG / (2 + diffB * diffB)));
r = bayerptr[0]; //r
g = ( bayerptr[-2 * width] * -1
+ bayerptr[-1 * width] * factorR
+ bayerptr[-2] * -1 + bayerptr[-1] * factorR + bayerptr[0] * 4 + bayerptr[1] * factorR + bayerptr[2] * -1
+ bayerptr[+1 * width] * factorR
+ bayerptr[+2 * width] * -1 ) / (4 * factorR); //g
b = ( bayerptr[-2 * width] * -3
+ bayerptr[-1 * width - 1] * factorB + bayerptr[-1 * width + 1] * factorB
+ bayerptr[-2] * -3 + bayerptr[0] * 12 + bayerptr[2] * -3
+ bayerptr[+1 * width - 1] * factorB + bayerptr[+1 * width + 1] * factorB
+ bayerptr[+2 * width] * -3 ) / (4 * factorB); //b
#else
r = bayerptr[0]; //r
g = ( bayerptr[-2 * width] * -1
+ bayerptr[-1 * width] * 2
+ bayerptr[-2] * -1 + bayerptr[-1] * 2 + bayerptr[0] * 4 + bayerptr[1] * 2 + bayerptr[2] * -1
+ bayerptr[+1 * width] * 2
+ bayerptr[+2 * width] * -1 ) >> 3; //g
b = ( bayerptr[-2 * width] * -3
+ bayerptr[-1 * width - 1] * 4 + bayerptr[-1 * width + 1] * 4
+ bayerptr[-2] * -3 + bayerptr[0] * 12 + bayerptr[2] * -3
+ bayerptr[+1 * width - 1] * 4 + bayerptr[+1 * width + 1] * 4
+ bayerptr[+2 * width] * -3 ) >> 4; //b
#endif
}
*rgbptr++ = (r);
*rgbptr++ = SATURATE16(g);
*rgbptr++ = SATURATE16(b);
}
void GRNREDCELL(unsigned short *rgbptr, unsigned short *bayerptr, int width)
{
int r, g, b;
{
/* normal 5x5 */
#if CF_ENHANCE
int diffR = abs((int)bayerptr[-1] - (int)bayerptr[1]) >> 10;
int diffG = abs((int)bayerptr[-2] - (int)bayerptr[2]) >> 10;
int diffB = abs((int)bayerptr[-1 * width] - (int)bayerptr[1 * width]) >> 10;
int factorR = (8 + (4 * diffG * diffG / (2 + diffR * diffR)));
int factorB = (8 + (4 * diffG * diffG / (2 + diffB * diffB)));
r = ( bayerptr[-2 * width] * 1
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * -2 + bayerptr[-1] * factorR + bayerptr[0] * 10 + bayerptr[1] * factorR + bayerptr[2] * -2
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * 1 ) / (factorR * 2); //r
g = bayerptr[0]; //g
b = ( bayerptr[-2 * width] * -2
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width] * factorB + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * 1 + bayerptr[0] * 10 + bayerptr[2] * 1
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width] * factorB + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * -2 ) / (factorB * 2); //b
#else
r = (( bayerptr[-2 * width] * 1
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * -2 + bayerptr[-1] * 8 + bayerptr[0] * 10 + bayerptr[1] * 8 + bayerptr[2] * -2
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * 1 ) >> 4); //r
g = bayerptr[0]; //g
b = ( bayerptr[-2 * width] * -2
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width] * 8 + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * 1 + bayerptr[0] * 10 + bayerptr[2] * 1
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width] * 8 + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * -2 ) >> 4; //b
#endif // CF_ENHANCE
}
*rgbptr++ = SATURATE16(r);
*rgbptr++ = (g);
*rgbptr++ = SATURATE16(b);
}
void GRNBLUCELL(unsigned short *rgbptr, unsigned short *bayerptr, int width)
{
int r, g, b;
{
/* normal 5x5 */
#if CF_ENHANCE
int diffR = abs((int)bayerptr[-1 * width] - (int)bayerptr[1 * width]) >> 10;
int diffG = abs((int)bayerptr[-2 * width] - (int)bayerptr[2 * width]) >> 10;
int diffB = abs((int)bayerptr[-1] - (int)bayerptr[1]) >> 10;
int factorR = (8 + (4 * diffG * diffG / (2 + diffR * diffR)));
int factorB = (8 + (4 * diffG * diffG / (2 + diffB * diffB)));
r = ( bayerptr[-2 * width] * -2
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width] * factorR + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * 1 + bayerptr[0] * 10 + bayerptr[2] * 1
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width] * factorR + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * -2 ) / (factorR * 2); //r
g = bayerptr[0]; //g
b = ( bayerptr[-2 * width] * 1
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * -2 + bayerptr[-1] * factorB + bayerptr[0] * 10 + bayerptr[1] * factorB + bayerptr[2] * -2
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * 1 ) / (factorB * 2); //b
#else
r = (( bayerptr[-2 * width] * -2
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width] * 8 + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * 1 + bayerptr[0] * 10 + bayerptr[2] * 1
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width] * 8 + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * -2 ) >> 4);
g = bayerptr[0]; //g
b = ( bayerptr[-2 * width] * 1
+ bayerptr[-1 * width - 1] * -2 + bayerptr[-1 * width + 1] * -2
+ bayerptr[-2] * -2 + bayerptr[-1] * 8 + bayerptr[0] * 10 + bayerptr[1] * 8 + bayerptr[2] * -2
+ bayerptr[+1 * width - 1] * -2 + bayerptr[+1 * width + 1] * -2
+ bayerptr[+2 * width] * 1 ) >> 4; //b
#endif // CF_ENHANCE
}
*rgbptr++ = SATURATE16(r);
*rgbptr++ = (g);
*rgbptr++ = SATURATE16(b);
}
void BLUCELL(unsigned short *rgbptr, unsigned short *bayerptr, int width)
{
int r, g, b;
{
/* normal 5x5 */
#if CF_ENHANCE
int diffR = abs((int)bayerptr[-1 * width - 1] - (int)bayerptr[+1 * width + 1]) >> 10;
int diffG = abs((int)bayerptr[-1] - (int)bayerptr[1]) >> 10;
int diffB = abs((int)bayerptr[-2] - (int)bayerptr[2]) >> 10;
int factorR = (4 + (4 * diffG * diffG / (2 + diffR * diffR)));
int factorB = (2 + (2 * diffB * diffB / (2 + diffG * diffG)));
r = ( bayerptr[-2 * width] * -3
+ bayerptr[-1 * width - 1] * factorR + bayerptr[-1 * width + 1] * factorR
+ bayerptr[-2] * -3 + bayerptr[0] * 12 + bayerptr[2] * -3
+ bayerptr[+1 * width - 1] * factorR + bayerptr[+1 * width + 1] * factorR
+ bayerptr[+2 * width] * -3 ) / (factorR * 4);
g = ( bayerptr[-2 * width] * -1
+ bayerptr[-1 * width] * factorB
+ bayerptr[-2] * -1 + bayerptr[-1] * factorB + bayerptr[0] * 4 + bayerptr[1] * factorB + bayerptr[2] * -1
+ bayerptr[+1 * width] * factorB
+ bayerptr[+2 * width] * -1 ) / (factorB * 4); //g
b = bayerptr[0]; //b
#else
r = (( bayerptr[-2 * width] * -3
+ bayerptr[-1 * width - 1] * 4 + bayerptr[-1 * width + 1] * 4
+ bayerptr[-2] * -3 + bayerptr[0] * 12 + bayerptr[2] * -3
+ bayerptr[+1 * width - 1] * 4 + bayerptr[+1 * width + 1] * 4
+ bayerptr[+2 * width] * -3 ) >> 4);
g = ( bayerptr[-2 * width] * -1
+ bayerptr[-1 * width] * 2
+ bayerptr[-2] * -1 + bayerptr[-1] * 2 + bayerptr[0] * 4 + bayerptr[1] * 2 + bayerptr[2] * -1
+ bayerptr[+1 * width] * 2
+ bayerptr[+2 * width] * -1 ) >> 3; //g
b = bayerptr[0]; //b
#endif // CF_ENHANCE
}
*rgbptr++ = SATURATE16(r);
*rgbptr++ = SATURATE16(g);
*rgbptr++ = (b);
}
void FastSharpeningBlurHinplace(int width, unsigned short *sptr, int sharpness)
{
int i = 0, shift = 2, B, C;
uint16_t *outptr = sptr;
int rneg1, rneg2;
int gneg1, gneg2;
int bneg1, bneg2;
// *outptr++ = *sptr++; //R
// *outptr++ = *sptr++; //G
// *outptr++ = *sptr++; //B
rneg2 = *sptr++; //R
gneg2 = *sptr++; //G
bneg2 = *sptr++; //B
// blur 1,2,1
// *outptr++ = (sptr[-3] + sptr[0]*2 + sptr[3])>>2; sptr++; //R
// *outptr++ = (sptr[-3] + sptr[0]*2 + sptr[3])>>2; sptr++; //G
// *outptr++ = (sptr[-3] + sptr[0]*2 + sptr[3])>>2; sptr++; //B
rneg1 = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //R
gneg1 = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //G
bneg1 = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //B
switch (sharpness)
{
default:
case 3: //highest sharpen
shift = 2;
B = 1;
C = 4;
break;
case 2: //nice sharpen
shift = 3;
B = 2;
C = 6;
break;
case 1: //small sharpen
shift = 4;
B = 4;
C = 10;
break;
}
for (i = 2; i < width - 2; i++)
{
*outptr++ = SATURATE16((-sptr[-6] + sptr[-3] * B + sptr[0] * C + sptr[3] * B - sptr[6]) >> shift);
sptr++; //R
*outptr++ = SATURATE16((-sptr[-6] + sptr[-3] * B + sptr[0] * C + sptr[3] * B - sptr[6]) >> shift);
sptr++; //G
*outptr++ = SATURATE16((-sptr[-6] + sptr[-3] * B + sptr[0] * C + sptr[3] * B - sptr[6]) >> shift);
sptr++; //B
}
// blur 1,2,1
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //R
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //G
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //B
*outptr++ = *sptr++; //R
*outptr++ = *sptr++; //G
*outptr++ = *sptr++; //B
outptr += 5;
for (i = 2; i < width; i++)
{
//TODO: The GCC compiler warns that the operation on outptr may be undefined
// *outptr-- = outptr[-6];
// *outptr-- = outptr[-6];
// *outptr-- = outptr[-6];
outptr[0] = outptr[-6];
outptr[-1] = outptr[-7];
outptr[-2] = outptr[-8];
outptr -= 3;
}
*outptr-- = bneg1;
*outptr-- = gneg1;
*outptr-- = rneg1;
*outptr-- = bneg2;
*outptr-- = gneg2;
*outptr-- = rneg2;
}
void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize)
{
//int i=0,shift=2,D=0;
int16_t *outptr = sptr;
int16_t *outpt2 = sptr + (displayWidth - 1) * pixelsize / 2;
int xinner, xouter;
int x;
float xstep = 2.0f / (float)(displayWidth);
float xpos = -1.0f; // far left
float ypos = 2.0f * (float)(height / 2 - y) / (float)width;
ypos *= ypos;
r1 *= r1;
r2 *= r2;
xinner = (width / 2) - (int)((float)(width / 2) * r1);
xouter = (width / 2) - (int)((float)(width / 2) * r2);
if (pixelsize == 6)
{
for (x = 0; x < displayWidth / 2; x++)
{
float r = xpos * xpos + ypos;
if (r2 < r)
{
if (gain == 0.0)
{
outptr[0] = 0; //R
outptr[1] = 0; //G
outptr[2] = 0; //B
outpt2[0] = 0; //R
outpt2[1] = 0; //G
outpt2[2] = 0; //B
}
else
{
float Af = gain;
if (outptr[0] >= 0)
outptr[0] = (int)sqrtf((float)outptr[0] * (float)outptr[0] * Af); //R
else
outptr[0] = -(int)sqrtf((float)outptr[0] * (float)outptr[0] * Af); //R
if (outptr[1] >= 0)
outptr[1] = (int)sqrtf((float)outptr[1] * (float)outptr[1] * Af); //G
else
outptr[1] = -(int)sqrtf((float)outptr[1] * (float)outptr[1] * Af); //G
if (outptr[2] >= 0)
outptr[2] = (int)sqrtf((float)outptr[2] * (float)outptr[2] * Af); //B
else
outptr[2] = -(int)sqrtf((float)outptr[2] * (float)outptr[2] * Af); //B
if (outpt2[0] >= 0)
outpt2[0] = (int)sqrtf((float)outpt2[0] * (float)outpt2[0] * Af); //R
else
outpt2[0] = -(int)sqrtf((float)outpt2[0] * (float)outpt2[0] * Af); //R
if (outpt2[1] >= 0)
outpt2[1] = (int)sqrtf((float)outpt2[1] * (float)outpt2[1] * Af); //G
else
outpt2[1] = -(int)sqrtf((float)outpt2[1] * (float)outpt2[1] * Af); //G
if (outpt2[2] >= 0)
outpt2[2] = (int)sqrtf((float)outpt2[2] * (float)outpt2[2] * Af); //B
else
outpt2[2] = -(int)sqrtf((float)outpt2[2] * (float)outpt2[2] * Af); //B
}
}
else if (r1 < r)
{
float Af = (r2 - r) / (r2 - r1);
Af -= 0.5f;
Af *= 2.0f;
Af /= (1.0f + fabsf(Af));
Af += 0.5f;
Af *= (1.0f - gain);
Af += gain;
if (outptr[0] >= 0)
outptr[0] = (int)sqrtf((float)outptr[0] * (float)outptr[0] * Af); //R
else
outptr[0] = -(int)sqrtf((float)outptr[0] * (float)outptr[0] * Af); //R
if (outptr[1] >= 0)
outptr[1] = (int)sqrtf((float)outptr[1] * (float)outptr[1] * Af); //G
else
outptr[1] = -(int)sqrtf((float)outptr[1] * (float)outptr[1] * Af); //G
if (outptr[2] >= 0)
outptr[2] = (int)sqrtf((float)outptr[2] * (float)outptr[2] * Af); //B
else
outptr[2] = -(int)sqrtf((float)outptr[2] * (float)outptr[2] * Af); //B
if (outpt2[0] >= 0)
outpt2[0] = (int)sqrtf((float)outpt2[0] * (float)outpt2[0] * Af); //R
else
outpt2[0] = -(int)sqrtf((float)outpt2[0] * (float)outpt2[0] * Af); //R
if (outpt2[1] >= 0)
outpt2[1] = (int)sqrtf((float)outpt2[1] * (float)outpt2[1] * Af); //G
else
outpt2[1] = -(int)sqrtf((float)outpt2[1] * (float)outpt2[1] * Af); //G
if (outpt2[2] >= 0)
outpt2[2] = (int)sqrtf((float)outpt2[2] * (float)outpt2[2] * Af); //B
else
outpt2[2] = -(int)sqrtf((float)outpt2[2] * (float)outpt2[2] * Af); //B
}
else
{
break;
}
outptr += 3;
outpt2 -= 3;
xpos += xstep;
}
}
else
{
}
}
void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize)
{
int i = 0, shift = 2, A, B, C, taps = 1;
int16_t *outptr = sptr;
int rneg1, rneg2;
int gneg1, gneg2;
int bneg1, bneg2;
int aneg1, aneg2;
int diff;
int adiff;
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_VERTICAL:
taps = 5;
break;
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
taps = 3;
break;
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
taps = 1;
break;
}
if (sharpness < 0.0)
{
diff = (int)(256.0f * (-sharpness * 4.0f - (float)((int)(-sharpness * 4.0f))));
adiff = 256 - diff;
if (pixelsize == 6)
{
if (taps == 5)
{
switch (-1 + (int)(sharpness * 4.0))
{
case -5://highest blur
diff = 256;
case -4: //blur
sptr += 4 * 3;
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 4096 / 9 * diff / 256;
B = (4096 - A * 2) / 7;
for (i = 4; i < width - 4; i++)
{
outptr[0] = (((sptr[-12] + sptr[12]) * A + (sptr[-9] + sptr[-6] + sptr[-3] + sptr[0] + sptr[3] + sptr[6] + sptr[9]) * B) >> shift); //R
outptr[1] = (((sptr[-11] + sptr[13]) * A + (sptr[-8] + sptr[-5] + sptr[-2] + sptr[1] + sptr[4] + sptr[7] + sptr[10]) * B) >> shift); //G
outptr[2] = (((sptr[-10] + sptr[14]) * A + (sptr[-7] + sptr[-4] + sptr[-1] + sptr[2] + sptr[5] + sptr[8] + sptr[11]) * B) >> shift); //B
sptr += 3;
outptr += 3;
}
for (i = 4; i < width - 8; i++)
{
outptr -= 3;
outptr[0] = outptr[-12];
outptr[1] = outptr[-11];
outptr[2] = outptr[-10];
}
outptr -= 3;
outptr[0] = outptr[-9];
outptr[1] = outptr[-8];
outptr[2] = outptr[-7];
outptr -= 3;
outptr[0] = outptr[-6];
outptr[1] = outptr[-5];
outptr[2] = outptr[-4];
outptr -= 3;
outptr[0] = outptr[-3];
outptr[1] = outptr[-2];
outptr[2] = outptr[-1];
break;
case -3: //blur
sptr += 3 * 3;
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 4096 / 7 * diff / 256;
B = (4096 - A * 2) / 5;
for (i = 3; i < width - 3; i++)
{
outptr[0] = (((sptr[-9] + sptr[9]) * A + (sptr[-6] + sptr[-3] + sptr[0] + sptr[3] + sptr[6]) * B ) >> shift); //R
outptr[1] = (((sptr[-8] + sptr[10]) * A + (sptr[-5] + sptr[-2] + sptr[1] + sptr[4] + sptr[7]) * B ) >> shift); //G
outptr[2] = (((sptr[-7] + sptr[11]) * A + (sptr[-4] + sptr[-1] + sptr[2] + sptr[5] + sptr[8]) * B ) >> shift); //B
sptr += 3;
outptr += 3;
}
for (i = 3; i < width - 6; i++)
{
outptr -= 3;
outptr[0] = outptr[-9];
outptr[1] = outptr[-8];
outptr[2] = outptr[-7];
}
outptr -= 3;
outptr[0] = outptr[-6];
outptr[1] = outptr[-5];
outptr[2] = outptr[-4];
outptr -= 3;
outptr[0] = outptr[-3];
outptr[1] = outptr[-2];
outptr[2] = outptr[-1];
break;
case -2: //blur
sptr += 2 * 3;
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 0 * adiff + 2 * diff;
B = 4 * adiff + 4 * diff;
C = 8 * adiff + 4 * diff;
for (i = 2; i < width - 2; i++)
{
outptr[0] = ((sptr[-6] * A + sptr[-3] * B + sptr[0] * C + sptr[3] * B + sptr[6] * A) >> shift); //R
outptr[1] = ((sptr[-5] * A + sptr[-2] * B + sptr[1] * C + sptr[4] * B + sptr[7] * A) >> shift); //G
outptr[2] = ((sptr[-4] * A + sptr[-1] * B + sptr[2] * C + sptr[5] * B + sptr[8] * A) >> shift); //B
sptr += 3;
outptr += 3;
}
for (i = 2; i < width - 4; i++)
{
outptr -= 3;
outptr[0] = outptr[-6];
outptr[1] = outptr[-5];
outptr[2] = outptr[-4];
}
outptr -= 3;
outptr[0] = outptr[-3];
outptr[1] = outptr[-2];
outptr[2] = outptr[-1];
break;
case -1: //small blur
sptr += 2 * 3;
shift = 4 + 8; //A = 0; B = 1; C = 14 0,2,12,2,0
A = 0 * adiff + 0 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 8 * diff;
for (i = 2; i < width - 2; i++)
{
outptr[0] = ((sptr[-6] * A + sptr[-3] * B + sptr[0] * C + sptr[3] * B + sptr[6] * A) >> shift); //R
outptr[1] = ((sptr[-5] * A + sptr[-2] * B + sptr[1] * C + sptr[4] * B + sptr[7] * A) >> shift); //G
outptr[2] = ((sptr[-4] * A + sptr[-1] * B + sptr[2] * C + sptr[5] * B + sptr[8] * A) >> shift); //B
sptr += 3;
outptr += 3;
}
for (i = 2; i < width - 4; i++)
{
outptr -= 3;
outptr[0] = outptr[-6];
outptr[1] = outptr[-5];
outptr[2] = outptr[-4];
}
outptr -= 3;
outptr[0] = outptr[-3];
outptr[1] = outptr[-2];
outptr[2] = outptr[-1];
break;
}
}
else if (taps == 3)
{
diff = (int)(256.0f * (-sharpness - (float)((int)(-sharpness * 0.999f))));
adiff = 256 - diff;
{
sptr += 2 * 3;
shift = 4 + 8; //A = 0; B = 1; C = 14 0,2,12,2,0
A = 0 * adiff + 2 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 4 * diff;
for (i = 2; i < width - 2; i++)
{
outptr[0] = ((sptr[-6] * A + sptr[-3] * B + sptr[0] * C + sptr[3] * B + sptr[6] * A) >> shift); //R
outptr[1] = ((sptr[-5] * A + sptr[-2] * B + sptr[1] * C + sptr[4] * B + sptr[7] * A) >> shift); //G
outptr[2] = ((sptr[-4] * A + sptr[-1] * B + sptr[2] * C + sptr[5] * B + sptr[8] * A) >> shift); //B
sptr += 3;
outptr += 3;
}
for (i = 2; i < width - 4; i++)
{
outptr -= 3;
outptr[0] = outptr[-6];
outptr[1] = outptr[-5];
outptr[2] = outptr[-4];
}
outptr -= 3;
outptr[0] = outptr[-3];
outptr[1] = outptr[-2];
outptr[2] = outptr[-1];
}
}
}
else
{
if (taps == 5)
{
switch ((int)(sharpness * 5.0))
{
case -5://highest blur
diff = 256;
case -4: //blur
sptr += 4 * 4;
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 4096 / 9 * diff / 256;
B = (4096 - A * 2) / 7;
for (i = 4; i < width - 4; i++)
{
outptr[0] = (((sptr[-16] + sptr[16]) * A + (sptr[-12] + sptr[-8] + sptr[-4] + sptr[0] + sptr[4] + sptr[8] + sptr[12]) * B) >> shift); //R
outptr[1] = (((sptr[-15] + sptr[17]) * A + (sptr[-11] + sptr[-7] + sptr[-3] + sptr[1] + sptr[5] + sptr[9] + sptr[13]) * B) >> shift); //G
outptr[2] = (((sptr[-14] + sptr[18]) * A + (sptr[-10] + sptr[-6] + sptr[-2] + sptr[2] + sptr[6] + sptr[10] + sptr[14]) * B) >> shift);
outptr[3] = sptr[3]; //A //B
sptr += 4;
outptr += 4;
}
for (i = 4; i < width - 8; i++)
{
outptr -= 4;
outptr[0] = outptr[-16];
outptr[1] = outptr[-15];
outptr[2] = outptr[-14];
outptr[3] = outptr[-13]; //A
}
outptr -= 4;
outptr[0] = outptr[-12];
outptr[1] = outptr[-11];
outptr[2] = outptr[-10];
outptr[3] = outptr[-9];
outptr -= 4;
outptr[0] = outptr[-8];
outptr[1] = outptr[-7];
outptr[2] = outptr[-6];
outptr[3] = outptr[-5];
outptr -= 4;
outptr[0] = outptr[-4];
outptr[1] = outptr[-3];
outptr[2] = outptr[-2];
outptr[3] = outptr[-1];
break;
case -3: //blur
sptr += 3 * 4;
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 4096 / 7 * diff / 256;
B = (4096 - A * 2) / 5;
for (i = 3; i < width - 3; i++)
{
outptr[0] = (((sptr[-12] + sptr[12]) * A + (sptr[-8] + sptr[-4] + sptr[0] + sptr[4] + sptr[8] ) * B) >> shift); //R
outptr[1] = (((sptr[-11] + sptr[13]) * A + (sptr[-7] + sptr[-3] + sptr[1] + sptr[5] + sptr[9] ) * B) >> shift); //G
outptr[2] = (((sptr[-10] + sptr[14]) * A + (sptr[-6] + sptr[-2] + sptr[2] + sptr[6] + sptr[10]) * B) >> shift);
outptr[3] = sptr[3]; //A //B
sptr += 4;
outptr += 4;
}
for (i = 3; i < width - 6; i++)
{
outptr -= 4;
outptr[0] = outptr[-12];
outptr[1] = outptr[-11];
outptr[2] = outptr[-10];
outptr[3] = outptr[-9];
}
outptr -= 4;
outptr[0] = outptr[-8];
outptr[1] = outptr[-7];
outptr[2] = outptr[-6];
outptr[3] = outptr[-5];
outptr -= 4;
outptr[0] = outptr[-4];
outptr[1] = outptr[-3];
outptr[2] = outptr[-2];
outptr[3] = outptr[-1];
break;
case -2: //blur
sptr += 2 * 4;
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 0 * adiff + 2 * diff;
B = 4 * adiff + 4 * diff;
C = 8 * adiff + 4 * diff;
for (i = 2; i < width - 2; i++)
{
outptr[0] = ((sptr[-8] * A + sptr[-4] * B + sptr[0] * C + sptr[4] * B + sptr[8] * A) >> shift); //R
outptr[1] = ((sptr[-7] * A + sptr[-3] * B + sptr[1] * C + sptr[5] * B + sptr[9] * A) >> shift); //G
outptr[2] = ((sptr[-6] * A + sptr[-2] * B + sptr[2] * C + sptr[6] * B + sptr[10] * A) >> shift); //B
outptr[3] = sptr[3];
sptr += 4;
outptr += 4;
}
for (i = 2; i < width - 4; i++)
{
outptr -= 4;
outptr[0] = outptr[-8];
outptr[1] = outptr[-7];
outptr[2] = outptr[-6];
outptr[3] = outptr[-5];
}
outptr -= 4;
outptr[0] = outptr[-4];
outptr[1] = outptr[-3];
outptr[2] = outptr[-2];
outptr[3] = outptr[-1];
break;
case -1: //small blur
sptr += 2 * 4;
shift = 4 + 8; //A = 0; B = 1; C = 14 0,2,12,2,0
A = 0 * adiff + 0 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 8 * diff;
for (i = 2; i < width - 2; i++)
{
outptr[0] = ((sptr[-8] * A + sptr[-4] * B + sptr[0] * C + sptr[4] * B + sptr[8] * A) >> shift); //R
outptr[1] = ((sptr[-7] * A + sptr[-3] * B + sptr[1] * C + sptr[5] * B + sptr[9] * A) >> shift); //G
outptr[2] = ((sptr[-6] * A + sptr[-2] * B + sptr[2] * C + sptr[6] * B + sptr[10] * A) >> shift); //B
outptr[3] = sptr[3];
sptr += 4;
outptr += 4;
}
for (i = 2; i < width - 4; i++)
{
outptr -= 4;
outptr[0] = outptr[-8];
outptr[1] = outptr[-7];
outptr[2] = outptr[-6];
outptr[3] = outptr[-5];
}
outptr -= 4;
outptr[0] = outptr[-4];
outptr[1] = outptr[-3];
outptr[2] = outptr[-2];
outptr[3] = outptr[-1];
break;
}
}
else if (taps == 3)
{
diff = (int)(256.0f * (-sharpness - (float)((int)(-sharpness * 0.999f))));
adiff = 256 - diff;
{
sptr += 2 * 4;
shift = 4 + 8; //A = 0; B = 1; C = 14 0,2,12,2,0
A = 0 * adiff + 2 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 4 * diff;
for (i = 2; i < width - 2; i++)
{
outptr[0] = ((sptr[-8] * A + sptr[-4] * B + sptr[0] * C + sptr[4] * B + sptr[8] * A) >> shift); //R
outptr[1] = ((sptr[-7] * A + sptr[-3] * B + sptr[1] * C + sptr[5] * B + sptr[9] * A) >> shift); //G
outptr[2] = ((sptr[-6] * A + sptr[-2] * B + sptr[2] * C + sptr[6] * B + sptr[10] * A) >> shift); //B
outptr[3] = sptr[3];
sptr += 4;
outptr += 4;
}
for (i = 2; i < width - 4; i++)
{
outptr -= 4;
outptr[0] = outptr[-8];
outptr[1] = outptr[-7];
outptr[2] = outptr[-6];
outptr[3] = outptr[-5];
}
outptr -= 4;
outptr[0] = outptr[-4];
outptr[1] = outptr[-3];
outptr[2] = outptr[-2];
outptr[3] = outptr[-1];
}
}
}
}
else
{
diff = (int)(256.0f * (sharpness * 5.0f - (float)((int)(sharpness * 5.0f))));
adiff = 256 - diff;
if (pixelsize == 6)
{
if (taps == 5)
{
rneg2 = *sptr++; //R
gneg2 = *sptr++; //G
bneg2 = *sptr++; //B
// blur 1,2,1
rneg1 = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //R
gneg1 = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //G
bneg1 = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //B
switch ((int)(sharpness * 5.0))
{
case -5://highest blur
shift = 4 + 8; //A = 2; B = 4; C = 4 2,3,2,3,2
A = 2 * adiff + 2 * diff;
B = 4 * adiff + 4 * diff;
C = 4 * adiff + 4 * diff;
break;
case -4: //blur
shift = 4 + 8; //A = 2; B = 4; C = 4 2,4,4,4,2
A = 2 * adiff + 2 * diff;
B = 4 * adiff + 4 * diff;
C = 4 * adiff + 4 * diff;
break;
case -3: //blur
shift = 4 + 8; //A = 2; B = 4; C = 4 2,4,4,4,2
A = 0 * adiff + 2 * diff;
B = 4 * adiff + 4 * diff;
C = 8 * adiff + 4 * diff;
break;
case -2: //blur
shift = 4 + 8; //A = 2; B = 4; C = 4 0,4,8,4,0
A = 0 * adiff + 2 * diff;
B = 4 * adiff + 4 * diff;
C = 8 * adiff + 4 * diff;
break;
case -1: //small blur
shift = 4 + 8; //A = 0; B = 1; C = 14 0,2,12,2,0
A = 0 * adiff + 0 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 8 * diff;
break;
case 0:
shift = 4 + 8; //A = 0; B = 0; C = 16<<8;
A = 0 * adiff - 1 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 10 * diff;
break;
case 1: //small sharpen
shift = 4 + 8; //A = -1<<8; B = 4<<8; C = 10<<8;
A = -1 * adiff - 2 * diff;
B = 4 * adiff + 4 * diff;
C = 10 * adiff + 12 * diff;
break;
case 2: //nice sharpen
shift = 4 + 8; //A = -2<<8; B = 4<<8; C = 12<<8;
A = -2 * adiff - 4 * diff;
B = 4 * adiff + 4 * diff;
C = 12 * adiff + 16 * diff;
break;
case 3: //highest sharpen
shift = 4 + 8; //A = -4<<8; B = 4<<8; C = 16<<8;
A = -4 * adiff - 8 * diff;
B = 4 * adiff + 8 * diff;
C = 16 * adiff + 16 * diff;
break;
case 4: //highest sharpen
shift = 4 + 8; //A = -8<<8; B = 8<<8; C = 16<<8;
A = -8 * adiff - 8 * diff;
B = 8 * adiff + 0 * diff;
C = 16 * adiff + 32 * diff;
break;
case 5: //highest sharpen
shift = 4; //A = -8; B = 0; C = 32;
A = -8;
B = 0;
C = 32;
break;
}
for (i = 2; i < width - 2; i++)
{
if (sptr[6] < 0) sptr[6] = 0;
if (sptr[7] < 0) sptr[7] = 0;
if (sptr[8] < 0) sptr[8] = 0;
outptr[0] = ((sptr[-6] * A + sptr[-3] * B + sptr[0] * C + sptr[3] * B + sptr[6] * A) >> shift); //R
outptr[1] = ((sptr[-5] * A + sptr[-2] * B + sptr[1] * C + sptr[4] * B + sptr[7] * A) >> shift); //G
outptr[2] = ((sptr[-4] * A + sptr[-1] * B + sptr[2] * C + sptr[5] * B + sptr[8] * A) >> shift); //B
sptr += 3;
outptr += 3;
}
// blur 1,2,1
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //R
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //G
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //B
*outptr++ = *sptr++; //R
*outptr++ = *sptr++; //G
*outptr++ = *sptr++; //B
outptr += 5;
for (i = 2; i < width; i++)
{
//TODO: The GCC compiler warns that the operation on outptr may be undefined
//*outptr-- = outptr[-6];
//*outptr-- = outptr[-6];
//*outptr-- = outptr[-6];
outptr[0] = outptr[-6];
outptr[-1] = outptr[-7];
outptr[-2] = outptr[-8];
outptr -= 3;
}
*outptr-- = bneg1;
*outptr-- = gneg1;
*outptr-- = rneg1;
*outptr-- = bneg2;
*outptr-- = gneg2;
*outptr-- = rneg2;
}
else if (taps == 3)
{
switch ((int)(sharpness * 5.0))
{
case 0:
shift = 4 + 8; //A = 0; B = 0; C = 16<<8;
A = 0 * adiff - 1 * diff;
C = 16 * adiff + 18 * diff;
break;
case 1: //small sharpen
shift = 4 + 8; //A = -1<<8; B = 4<<8; C = 10<<8;
A = -1 * adiff - 2 * diff;
C = 18 * adiff + 20 * diff;
break;
case 2: //nice sharpen
shift = 4 + 8; //A = -2<<8; B = 4<<8; C = 12<<8;
A = -2 * adiff - 4 * diff;
C = 20 * adiff + 24 * diff;
break;
case 3: //highest sharpen
shift = 4 + 8; //A = -4<<8; B = 4<<8; C = 16<<8;
A = -4 * adiff - 8 * diff;
C = 24 * adiff + 32 * diff;
break;
case 4: //highest sharpen
shift = 4 + 8; //A = -8<<8; B = 8<<8; C = 16<<8;
A = -8 * adiff - 8 * diff;
C = 32 * adiff + 32 * diff;
break;
case 5: //highest sharpen
shift = 4; //A = -8; B = 0; C = 32;
A = -8;
C = 32;
break;
}
// copy
*outptr++ = sptr[0];
sptr++; //R
*outptr++ = sptr[0];
sptr++; //G
*outptr++ = sptr[0];
sptr++; //B
for (i = 1; i < width - 1; i++)
{
if (sptr[3] < 0) sptr[3] = 0;
if (sptr[4] < 0) sptr[4] = 0;
if (sptr[5] < 0) sptr[5] = 0;
outptr[0] = ((sptr[-3] * A + sptr[0] * C + sptr[3] * A) >> shift); //R
outptr[1] = ((sptr[-2] * A + sptr[1] * C + sptr[4] * A) >> shift); //G
outptr[2] = ((sptr[-1] * A + sptr[2] * C + sptr[5] * A) >> shift); //B
sptr += 3;
outptr += 3;
}
// copy
*outptr++ = sptr[0];
sptr++; //R
*outptr++ = sptr[0];
sptr++; //G
*outptr++ = sptr[0];
sptr++; //B
}
}
else
{
if (taps == 5)
{
rneg2 = *sptr++; //R
gneg2 = *sptr++; //G
bneg2 = *sptr++; //B
aneg2 = *sptr++; //A
// blur 1,2,1
rneg1 = ((sptr[-4] + sptr[0] * 2 + sptr[4]) >> 2);
sptr++; //R
gneg1 = ((sptr[-4] + sptr[0] * 2 + sptr[4]) >> 2);
sptr++; //G
bneg1 = ((sptr[-4] + sptr[0] * 2 + sptr[4]) >> 2);
sptr++; //B
aneg1 = *sptr++; //A
switch ((int)(sharpness * 5.0))
{
case 0:
shift = 4 + 8; //A = 0; B = 0; C = 16<<8;
A = 0 * adiff - 1 * diff;
B = 0 * adiff + 4 * diff;
C = 16 * adiff + 10 * diff;
break;
case 1: //small sharpen
shift = 4 + 8; //A = -1<<8; B = 4<<8; C = 10<<8;
A = -1 * adiff - 2 * diff;
B = 4 * adiff + 4 * diff;
C = 10 * adiff + 12 * diff;
break;
case 2: //nice sharpen
shift = 4 + 8; //A = -2<<8; B = 4<<8; C = 12<<8;
A = -2 * adiff - 4 * diff;
B = 4 * adiff + 4 * diff;
C = 12 * adiff + 16 * diff;
break;
case 3: //highest sharpen
shift = 4 + 8; //A = -4<<8; B = 4<<8; C = 16<<8;
A = -4 * adiff - 8 * diff;
B = 4 * adiff + 8 * diff;
C = 16 * adiff + 16 * diff;
break;
case 4: //highest sharpen
shift = 4 + 8; //A = -8<<8; B = 8<<8; C = 16<<8;
A = -8 * adiff - 8 * diff;
B = 8 * adiff + 0 * diff;
C = 16 * adiff + 32 * diff;
break;
case 5: //highest sharpen
shift = 4; //A = -8; B = 0; C = 32;
A = -8;
B = 0;
C = 32;
break;
}
for (i = 2; i < width - 2; i++)
{
if (sptr[8] < 0) sptr[8] = 0;
if (sptr[9] < 0) sptr[9] = 0;
if (sptr[10] < 0) sptr[10] = 0;
outptr[0] = ((sptr[-8] * A + sptr[-4] * B + sptr[0] * C + sptr[4] * B + sptr[8] * A) >> shift); //R
outptr[1] = ((sptr[-7] * A + sptr[-3] * B + sptr[1] * C + sptr[5] * B + sptr[9] * A) >> shift); //G
outptr[2] = ((sptr[-6] * A + sptr[-2] * B + sptr[2] * C + sptr[6] * B + sptr[10] * A) >> shift); //B
outptr[3] = sptr[3]; //A
sptr += 4;
outptr += 4;
}
// blur 1,2,1
*outptr++ = ((sptr[-4] + sptr[0] * 2 + sptr[4]) >> 2);
sptr++; //R
*outptr++ = ((sptr[-4] + sptr[0] * 2 + sptr[4]) >> 2);
sptr++; //G
*outptr++ = ((sptr[-4] + sptr[0] * 2 + sptr[4]) >> 2);
sptr++; //B
*outptr++ = *sptr++; //A
*outptr++ = *sptr++; //R
*outptr++ = *sptr++; //G
*outptr++ = *sptr++; //B
*outptr++ = *sptr++; //A
outptr += 7;
for (i = 2; i < width; i++)
{
//TODO: The GCC compiler warns that the operation on outptr may be undefined
//*outptr-- = outptr[-8];
//*outptr-- = outptr[-8];
//*outptr-- = outptr[-8];
//*outptr-- = outptr[-8];
outptr[0] = outptr[-8];
outptr[-1] = outptr[-9];
outptr[-2] = outptr[-10];
outptr[-3] = outptr[-11];
outptr -= 4;
}
*outptr-- = aneg1;
*outptr-- = bneg1;
*outptr-- = gneg1;
*outptr-- = rneg1;
*outptr-- = aneg2;
*outptr-- = bneg2;
*outptr-- = gneg2;
*outptr-- = rneg2;
}
else if (taps == 3)
{
switch ((int)(sharpness * 5.0))
{
case 0:
shift = 4 + 8; //A = 0; B = 0; C = 16<<8;
A = 0 * adiff - 1 * diff;
C = 16 * adiff + 18 * diff;
break;
case 1: //small sharpen
shift = 4 + 8; //A = -1<<8; B = 4<<8; C = 10<<8;
A = -1 * adiff - 2 * diff;
C = 18 * adiff + 20 * diff;
break;
case 2: //nice sharpen
shift = 4 + 8; //A = -2<<8; B = 4<<8; C = 12<<8;
A = -2 * adiff - 4 * diff;
C = 20 * adiff + 24 * diff;
break;
case 3: //highest sharpen
shift = 4 + 8; //A = -4<<8; B = 4<<8; C = 16<<8;
A = -4 * adiff - 8 * diff;
C = 24 * adiff + 32 * diff;
break;
case 4: //highest sharpen
shift = 4 + 8; //A = -8<<8; B = 8<<8; C = 16<<8;
A = -8 * adiff - 8 * diff;
C = 32 * adiff + 32 * diff;
break;
case 5: //highest sharpen
shift = 4; //A = -8; B = 0; C = 32;
A = -8;
C = 32;
break;
}
// copy
outptr[0] = sptr[0]; //R
outptr[1] = sptr[1]; //G
outptr[2] = sptr[2]; //B
outptr[3] = sptr[3]; //A
outptr += 4;
sptr += 4;
for (i = 1; i < width - 1; i++)
{
if (sptr[4] < 0) sptr[4] = 0;
if (sptr[5] < 0) sptr[5] = 0;
if (sptr[6] < 0) sptr[6] = 0;
outptr[0] = ((sptr[-4] * A + sptr[0] * C + sptr[4] * A) >> shift); //R
outptr[1] = ((sptr[-3] * A + sptr[1] * C + sptr[5] * A) >> shift); //G
outptr[2] = ((sptr[-2] * A + sptr[2] * C + sptr[6] * A) >> shift); //B
outptr[3] = sptr[3]; //A
outptr += 4;
sptr += 4;
}
// copy
outptr[0] = sptr[0]; //R
outptr[1] = sptr[1]; //G
outptr[2] = sptr[2]; //B
outptr[3] = sptr[3]; //A
outptr += 4;
sptr += 4;
}
}
}
}
void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type)
{
int i = 0, taps = 1;
__m128i zero_epi16;
__m128 a, b, c;
int FIRsize = 5;
float af, bf, cf;
float diff = sharpness * 5.0f - (float)((int)(sharpness * 5.0f));
float adiff = 1.0f - diff;
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
taps = 5;
if ( channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED ||
channel_blend_type == BLEND_FREEVIEW)
{
taps = 3;
}
break;
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_VERTICAL:
taps = 3;
if ( channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED ||
channel_blend_type == BLEND_FREEVIEW)
{
taps = 1;
}
break;
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
taps = 1;
break;
}
if (sharpness < 0.0)
{
if (taps == 5)
{
diff = -sharpness * 4.0f - (float)((int)(-sharpness * 4.0f));
adiff = 1.0f - diff;
switch (-1 + (int)(sharpness * 4.0f))
{
case -5://highest blur
diff = 1.0f;
case -4: //blur
FIRsize = 9;
af = 1.0f / 9.0f * diff;
bf = (1.0f - af * 2.0f) / 7.0f;
if (edgenear)
{
FIRsize = 5;
af = 0.2f;
bf = 0.2f;
cf = 0.2f;
}
break;
case -3: //blur
FIRsize = 7;
af = 1.0f / 7.0f * diff;
bf = (1.0f - af * 2.0f) / 5.0f;
if (edgenear)
{
FIRsize = 5;
af = 0.2f;
bf = 0.2f;
cf = 0.2f;
}
break;
case -2: //blur
FIRsize = 5;
af = 0.00f * adiff + 0.125f * diff;
bf = 0.25f * adiff + 0.25f * diff;
cf = 0.50f * adiff + 0.25f * diff;
break;
default:
case -1: //blur
FIRsize = 5;
af = 0.00f * adiff + 0.00f * diff;
bf = 0.00f * adiff + 0.25f * diff;
cf = 1.00f * adiff + 0.50f * diff;
break;
}
}
else if (taps == 3)
{
diff = -sharpness;
adiff = 1.0f - diff;
FIRsize = 5;
af = 0.00f * adiff + 0.125f * diff;
bf = 0.00f * adiff + 0.25f * diff;
cf = 1.00f * adiff + 0.25f * diff;
}
else
{
memcpy(output, Cptr, pixels * 3 * 2);
FIRsize = 1;
}
switch (FIRsize)
{
case 9:
{
int pixels8 = (pixels * 3) & 0xfff8;
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
short *A2ptr = Aptr - (pitch >> 1) * 2;
short *A1ptr = Aptr - (pitch >> 1);
short *E1ptr = Eptr + (pitch >> 1);
short *E2ptr = Eptr + (pitch >> 1) * 2;
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels8; i += 8)
{
__m128i mix_epi16;
__m128i A2_epi16 = _mm_load_si128((__m128i *)A2ptr);
__m128i A1_epi16 = _mm_load_si128((__m128i *)A1ptr);
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
__m128i E1_epi16 = _mm_load_si128((__m128i *)E1ptr);
__m128i E2_epi16 = _mm_load_si128((__m128i *)E2ptr);
A2ptr += 8;
A1ptr += 8;
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
E1ptr += 8;
E2ptr += 8;
__m128i A2_epi32a = _mm_unpackhi_epi16(zero_epi16, A2_epi16);
__m128i A1_epi32a = _mm_unpackhi_epi16(zero_epi16, A1_epi16);
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i E1_epi32a = _mm_unpackhi_epi16(zero_epi16, E1_epi16);
__m128i E2_epi32a = _mm_unpackhi_epi16(zero_epi16, E2_epi16);
__m128i A2_epi32b = _mm_unpacklo_epi16(zero_epi16, A2_epi16);
__m128i A1_epi32b = _mm_unpacklo_epi16(zero_epi16, A1_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128i E1_epi32b = _mm_unpacklo_epi16(zero_epi16, E1_epi16);
__m128i E2_epi32b = _mm_unpacklo_epi16(zero_epi16, E2_epi16);
__m128 A2aps = _mm_cvtepi32_ps(A2_epi32a);
__m128 A1aps = _mm_cvtepi32_ps(A1_epi32a);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 E1aps = _mm_cvtepi32_ps(E1_epi32a);
__m128 E2aps = _mm_cvtepi32_ps(E2_epi32a);
__m128 A2bps = _mm_cvtepi32_ps(A2_epi32b);
__m128 A1bps = _mm_cvtepi32_ps(A1_epi32b);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
__m128 E1bps = _mm_cvtepi32_ps(E1_epi32b);
__m128 E2bps = _mm_cvtepi32_ps(E2_epi32b);
A2aps = _mm_mul_ps(A2aps, a);
A1aps = _mm_mul_ps(A1aps, b);
Aaps = _mm_mul_ps(Aaps, b);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, b);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, b);
E1aps = _mm_mul_ps(E1aps, b);
E2aps = _mm_mul_ps(E2aps, a);
A2bps = _mm_mul_ps(A2bps, a);
A1bps = _mm_mul_ps(A1bps, b);
Abps = _mm_mul_ps(Abps, b);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, b);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, b);
E1bps = _mm_mul_ps(E1bps, b);
E2bps = _mm_mul_ps(E2bps, a);
Aaps = _mm_add_ps(Aaps, A2aps);
Aaps = _mm_add_ps(Aaps, A1aps);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Aaps = _mm_add_ps(Aaps, E1aps);
Aaps = _mm_add_ps(Aaps, E2aps);
Abps = _mm_add_ps(Abps, A2bps);
Abps = _mm_add_ps(Abps, A1bps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
Abps = _mm_add_ps(Abps, E1bps);
Abps = _mm_add_ps(Abps, E2bps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
break;
case 7:
{
int pixels8 = (pixels * 3) & 0xfff8;
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
short *A1ptr = Aptr - (pitch >> 1);
short *E1ptr = Eptr + (pitch >> 1);
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels8; i += 8)
{
__m128i mix_epi16;
__m128i A1_epi16 = _mm_load_si128((__m128i *)A1ptr);
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
__m128i E1_epi16 = _mm_load_si128((__m128i *)E1ptr);
A1ptr += 8;
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
E1ptr += 8;
__m128i A1_epi32a = _mm_unpackhi_epi16(zero_epi16, A1_epi16);
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i E1_epi32a = _mm_unpackhi_epi16(zero_epi16, E1_epi16);
__m128i A1_epi32b = _mm_unpacklo_epi16(zero_epi16, A1_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128i E1_epi32b = _mm_unpacklo_epi16(zero_epi16, E1_epi16);
__m128 A1aps = _mm_cvtepi32_ps(A1_epi32a);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 E1aps = _mm_cvtepi32_ps(E1_epi32a);
__m128 A1bps = _mm_cvtepi32_ps(A1_epi32b);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
__m128 E1bps = _mm_cvtepi32_ps(E1_epi32b);
A1aps = _mm_mul_ps(A1aps, a);
Aaps = _mm_mul_ps(Aaps, b);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, b);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, b);
E1aps = _mm_mul_ps(E1aps, a);
A1bps = _mm_mul_ps(A1bps, a);
Abps = _mm_mul_ps(Abps, b);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, b);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, b);
E1bps = _mm_mul_ps(E1bps, a);
Aaps = _mm_add_ps(Aaps, A1aps);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Aaps = _mm_add_ps(Aaps, E1aps);
Abps = _mm_add_ps(Abps, A1bps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
Abps = _mm_add_ps(Abps, E1bps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
break;
case 5:
{
int pixels8 = (pixels * 3) & 0xfff8;
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
c = _mm_set1_ps(cf);
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels8; i += 8)
{
__m128i mix_epi16;
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
Aaps = _mm_mul_ps(Aaps, a);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, c);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, a);
Abps = _mm_mul_ps(Abps, a);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, c);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, a);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
break;
}
}
else
{
if (taps == 5)
{
int pixels8 = (pixels * 3) & 0xfff8;
switch ((int)(sharpness * 5.0))
{
case 0:
default:
af = 0.000f * adiff - 0.0625f * diff;
bf = 0.000f * adiff + 0.2500f * diff;
cf = 1.000f * adiff + 0.6250f * diff;
break;
case 1: //small sharpen
//a = _mm_set1_ps(-0.0625);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.625);
af = -0.0625f * adiff - 0.1250f * diff;
bf = 0.2500f * adiff + 0.2500f * diff;
cf = 0.6250f * adiff + 0.7500f * diff;
break;
case 2: //nice sharpen
//a = _mm_set1_ps(-0.125);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.75);
af = -0.1250f * adiff - 0.2500f * diff;
bf = 0.2500f * adiff + 0.2500f * diff;
cf = 0.7500f * adiff + 1.0000f * diff;
break;
case 3: //higher
//a = _mm_set1_ps(-0.25);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(1.0);
af = -0.2500f * adiff - 0.5000f * diff;
bf = 0.2500f * adiff + 0.5000f * diff;
cf = 1.0000f * adiff + 1.0000f * diff;
break;
case 4: //overkill sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.5);
//c = _mm_set1_ps(1.0);
af = -0.5000f * adiff - 0.5000f * diff;
bf = 0.5000f * adiff + 0.0000f * diff;
cf = 1.0000f * adiff + 2.0000f * diff;
break;
case 5: //highest sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.0);
//c = _mm_set1_ps(2.0);
af = -0.5000f;
bf = 0.0000f;
cf = 2.0000f;
break;
}
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
c = _mm_set1_ps(cf);
zero_epi16 = _mm_set1_epi16(0);
/*Bset = _mm_set1_epi16(B);
Cset = _mm_set1_epi16(C);
shiftsse2 = shift - prescale;
if(preshift)
{
Bset = _mm_srai_epi16(Bset, preshift);
Cset = _mm_srai_epi16(Cset, preshift);
shiftsse2 -= preshift;
}
*/
for (i = 0; i < pixels8; i += 8)
{
__m128i mix_epi16;
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
Aaps = _mm_mul_ps(Aaps, a);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, c);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, a);
Abps = _mm_mul_ps(Abps, a);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, c);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, a);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
/*
if(prescale)
{
A_epi16 = _mm_srai_epi16(A_epi16,prescale);
B_epi16 = _mm_srai_epi16(B_epi16,prescale);
C_epi16 = _mm_srai_epi16(C_epi16,prescale);
D_epi16 = _mm_srai_epi16(D_epi16,prescale);
E_epi16 = _mm_srai_epi16(E_epi16,prescale);
}
if(preshift)
{
A_epi16 = _mm_srai_epi16(A_epi16, preshift);
E_epi16 = _mm_srai_epi16(E_epi16, preshift);
}
mix_epi16 = _mm_mullo_epi16(C_epi16, Cset);
mix_epi16 = _mm_subs_epi16(mix_epi16, A_epi16);
mix_epi16 = _mm_subs_epi16(mix_epi16, E_epi16);
tmp_epi16 = _mm_mullo_epi16(B_epi16, Bset);
mix_epi16 = _mm_adds_epi16(mix_epi16, tmp_epi16);
tmp_epi16 = _mm_mullo_epi16(D_epi16, Bset);
mix_epi16 = _mm_adds_epi16(mix_epi16, tmp_epi16);
mix_epi16 = _mm_srai_epi16(mix_epi16, shiftsse2);
*/
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
else if (taps == 3)
{
int pixels8 = (pixels * 3) & 0xfff8;
switch ((int)(sharpness * 5.0))
{
case 0:
default:
af = 0.000f * adiff - 0.0625f * diff;
cf = 1.000f * adiff + 1.1250f * diff;
break;
case 1: //small sharpen
//a = _mm_set1_ps(-0.0625);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.625);
af = -0.0625f * adiff - 0.1250f * diff;
cf = 1.1250f * adiff + 1.2500f * diff;
break;
case 2: //nice sharpen
//a = _mm_set1_ps(-0.125);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.75);
af = -0.1250f * adiff - 0.2500f * diff;
cf = 1.2500f * adiff + 1.5000f * diff;
break;
case 3: //higher
//a = _mm_set1_ps(-0.25);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(1.0);
af = -0.2500f * adiff - 0.5000f * diff;
cf = 1.5000f * adiff + 2.0000f * diff;
break;
case 4: //overkill sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.5);
//c = _mm_set1_ps(1.0);
af = -0.5000f * adiff - 0.5000f * diff;
cf = 2.0000f * adiff + 2.0000f * diff;
break;
case 5: //highest sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.0);
//c = _mm_set1_ps(2.0);
af = -0.5000f;
cf = 2.0000f;
break;
}
a = _mm_set1_ps(af);
c = _mm_set1_ps(cf);
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels8; i += 8)
{
__m128i mix_epi16;
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
Baps = _mm_mul_ps(Baps, a);
Caps = _mm_mul_ps(Caps, c);
Daps = _mm_mul_ps(Daps, a);
Bbps = _mm_mul_ps(Bbps, a);
Cbps = _mm_mul_ps(Cbps, c);
Dbps = _mm_mul_ps(Dbps, a);
Baps = _mm_add_ps(Baps, Caps);
Baps = _mm_add_ps(Baps, Daps);
Bbps = _mm_add_ps(Bbps, Cbps);
Bbps = _mm_add_ps(Bbps, Dbps);
C_epi32a = _mm_cvtps_epi32(Baps);
C_epi32b = _mm_cvtps_epi32(Bbps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
else
{
memcpy(output, Cptr, pixels * 3 * 2);
}
}
}
void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type)
{
int i = 0, taps = 1;
__m128i zero_epi16;
__m128 a, b, c;
__m128i maskA_epi16 = _mm_set_epi16((short) -1, 0, 0, 0, (short) -1, 0, 0, 0);
__m128i maskRGB_epi16 = _mm_set1_epi16((short) -1);
float af, bf, cf;
int FIRsize = 5;
float diff = sharpness * 5.0f - (float)((int)(sharpness * 5.0f));
float adiff = 1.0f - diff;
maskRGB_epi16 = _mm_sub_epi16 (maskRGB_epi16, maskA_epi16);
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
taps = 5;
if ( channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED ||
channel_blend_type == BLEND_FREEVIEW)
{
taps = 3;
}
break;
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_VERTICAL:
taps = 3;
if ( channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED ||
channel_blend_type == BLEND_FREEVIEW)
{
taps = 1;
}
break;
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
taps = 1;
break;
}
if (sharpness < 0.0)
{
if (taps == 5)
{
diff = -sharpness * 4.0f - (float)((int)(-sharpness * 4.0f));
adiff = 1.0f - diff;
switch (-1 + (int)(sharpness * 4.0f))
{
case -5://highest blur
diff = 1.0f;
case -4: //blur
FIRsize = 9;
af = 1.0f / 9.0f * diff;
bf = (1.0f - af * 2.0f) / 7.0f;
if (edgenear)
{
FIRsize = 5;
af = 0.2f;
bf = 0.2f;
cf = 0.2f;
}
break;
case -3: //blur
FIRsize = 7;
af = 1.0f / 7.0f * diff;
bf = (1.0f - af * 2.0f) / 5.0f;
if (edgenear)
{
FIRsize = 5;
af = 0.2f;
bf = 0.2f;
cf = 0.2f;
}
break;
case -2: //blur
FIRsize = 5;
af = 0.00f * adiff + 0.125f * diff;
bf = 0.25f * adiff + 0.25f * diff;
cf = 0.50f * adiff + 0.25f * diff;
break;
default:
case -1: //blur
FIRsize = 5;
af = 0.00f * adiff + 0.00f * diff;
bf = 0.00f * adiff + 0.25f * diff;
cf = 1.00f * adiff + 0.50f * diff;
break;
}
}
else if (taps == 3)
{
diff = -sharpness;
adiff = 1.0f - diff;
FIRsize = 5;
af = 0.00f * adiff + 0.125f * diff;
bf = 0.00f * adiff + 0.25f * diff;
cf = 1.00f * adiff + 0.25f * diff;
}
else
{
memcpy(output, Cptr, pixels * 3 * 2);
FIRsize = 1;
}
switch (FIRsize)
{
case 9:
{
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
short *A2ptr = Aptr - (pitch >> 1) * 2;
short *A1ptr = Aptr - (pitch >> 1);
short *E1ptr = Eptr + (pitch >> 1);
short *E2ptr = Eptr + (pitch >> 1) * 2;
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels * 4; i += 8)
{
__m128i mix_epi16;
__m128i A2_epi16 = _mm_load_si128((__m128i *)A2ptr);
__m128i A1_epi16 = _mm_load_si128((__m128i *)A1ptr);
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
__m128i E1_epi16 = _mm_load_si128((__m128i *)E1ptr);
__m128i E2_epi16 = _mm_load_si128((__m128i *)E2ptr);
A2ptr += 8;
A1ptr += 8;
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
E1ptr += 8;
E2ptr += 8;
__m128i A2_epi32a = _mm_unpackhi_epi16(zero_epi16, A2_epi16);
__m128i A1_epi32a = _mm_unpackhi_epi16(zero_epi16, A1_epi16);
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i E1_epi32a = _mm_unpackhi_epi16(zero_epi16, E1_epi16);
__m128i E2_epi32a = _mm_unpackhi_epi16(zero_epi16, E2_epi16);
__m128i A2_epi32b = _mm_unpacklo_epi16(zero_epi16, A2_epi16);
__m128i A1_epi32b = _mm_unpacklo_epi16(zero_epi16, A1_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128i E1_epi32b = _mm_unpacklo_epi16(zero_epi16, E1_epi16);
__m128i E2_epi32b = _mm_unpacklo_epi16(zero_epi16, E2_epi16);
__m128 A2aps = _mm_cvtepi32_ps(A2_epi32a);
__m128 A1aps = _mm_cvtepi32_ps(A1_epi32a);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 E1aps = _mm_cvtepi32_ps(E1_epi32a);
__m128 E2aps = _mm_cvtepi32_ps(E2_epi32a);
__m128 A2bps = _mm_cvtepi32_ps(A2_epi32b);
__m128 A1bps = _mm_cvtepi32_ps(A1_epi32b);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
__m128 E1bps = _mm_cvtepi32_ps(E1_epi32b);
__m128 E2bps = _mm_cvtepi32_ps(E2_epi32b);
A2aps = _mm_mul_ps(A2aps, a);
A1aps = _mm_mul_ps(A1aps, b);
Aaps = _mm_mul_ps(Aaps, b);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, b);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, b);
E1aps = _mm_mul_ps(E1aps, b);
E2aps = _mm_mul_ps(E2aps, a);
A2bps = _mm_mul_ps(A2bps, a);
A1bps = _mm_mul_ps(A1bps, b);
Abps = _mm_mul_ps(Abps, b);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, b);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, b);
E1bps = _mm_mul_ps(E1bps, b);
E2bps = _mm_mul_ps(E2bps, a);
Aaps = _mm_add_ps(Aaps, A2aps);
Aaps = _mm_add_ps(Aaps, A1aps);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Aaps = _mm_add_ps(Aaps, E1aps);
Aaps = _mm_add_ps(Aaps, E2aps);
Abps = _mm_add_ps(Abps, A2bps);
Abps = _mm_add_ps(Abps, A1bps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
Abps = _mm_add_ps(Abps, E1bps);
Abps = _mm_add_ps(Abps, E2bps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
break;
case 7:
{
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
short *A1ptr = Aptr - (pitch >> 1);
short *E1ptr = Eptr + (pitch >> 1);
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels * 4; i += 8)
{
__m128i mix_epi16;
__m128i A1_epi16 = _mm_load_si128((__m128i *)A1ptr);
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
__m128i E1_epi16 = _mm_load_si128((__m128i *)E1ptr);
A1ptr += 8;
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
E1ptr += 8;
__m128i A1_epi32a = _mm_unpackhi_epi16(zero_epi16, A1_epi16);
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i E1_epi32a = _mm_unpackhi_epi16(zero_epi16, E1_epi16);
__m128i A1_epi32b = _mm_unpacklo_epi16(zero_epi16, A1_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128i E1_epi32b = _mm_unpacklo_epi16(zero_epi16, E1_epi16);
__m128 A1aps = _mm_cvtepi32_ps(A1_epi32a);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 E1aps = _mm_cvtepi32_ps(E1_epi32a);
__m128 A1bps = _mm_cvtepi32_ps(A1_epi32b);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
__m128 E1bps = _mm_cvtepi32_ps(E1_epi32b);
A1aps = _mm_mul_ps(A1aps, a);
Aaps = _mm_mul_ps(Aaps, b);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, b);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, b);
E1aps = _mm_mul_ps(E1aps, a);
A1bps = _mm_mul_ps(A1bps, a);
Abps = _mm_mul_ps(Abps, b);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, b);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, b);
E1bps = _mm_mul_ps(E1bps, a);
Aaps = _mm_add_ps(Aaps, A1aps);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Aaps = _mm_add_ps(Aaps, E1aps);
Abps = _mm_add_ps(Abps, A1bps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
Abps = _mm_add_ps(Abps, E1bps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
break;
case 5:
{
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
c = _mm_set1_ps(cf);
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels * 4; i += 8)
{
__m128i mix_epi16;
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
Aaps = _mm_mul_ps(Aaps, a);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, c);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, a);
Abps = _mm_mul_ps(Abps, a);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, c);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, a);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
break;
}
}
else
{
if (taps == 5)
{
switch ((int)(sharpness * 5.0))
{
case 0:
default:
af = 0.000f * adiff - 0.0625f * diff;
bf = 0.000f * adiff + 0.2500f * diff;
cf = 1.000f * adiff + 0.6250f * diff;
break;
case 1: //small sharpen
//a = _mm_set1_ps(-0.0625);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.625);
af = -0.0625f * adiff - 0.1250f * diff;
bf = 0.2500f * adiff + 0.2500f * diff;
cf = 0.6250f * adiff + 0.7500f * diff;
break;
case 2: //nice sharpen
//a = _mm_set1_ps(-0.125);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.75);
af = -0.1250f * adiff - 0.2500f * diff;
bf = 0.2500f * adiff + 0.2500f * diff;
cf = 0.7500f * adiff + 1.0000f * diff;
break;
case 3: //higher
//a = _mm_set1_ps(-0.25);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(1.0);
af = -0.2500f * adiff - 0.5000f * diff;
bf = 0.2500f * adiff + 0.5000f * diff;
cf = 1.0000f * adiff + 1.0000f * diff;
break;
case 4: //overkill sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.5);
//c = _mm_set1_ps(1.0);
af = -0.5000f * adiff - 0.5000f * diff;
bf = 0.5000f * adiff + 0.0000f * diff;
cf = 1.0000f * adiff + 2.0000f * diff;
break;
case 5: //highest sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.0);
//c = _mm_set1_ps(2.0);
af = -0.5000f;
bf = 0.0000f;
cf = 2.0000f;
break;
}
a = _mm_set1_ps(af);
b = _mm_set1_ps(bf);
c = _mm_set1_ps(cf);
zero_epi16 = _mm_set1_epi16(0);
/*Bset = _mm_set1_epi16(B);
Cset = _mm_set1_epi16(C);
shiftsse2 = shift - prescale;
if(preshift)
{
Bset = _mm_srai_epi16(Bset, preshift);
Cset = _mm_srai_epi16(Cset, preshift);
shiftsse2 -= preshift;
}
*/
for (i = 0; i < pixels * 4; i += 8)
{
__m128i mix_epi16;
__m128i tmp_epi16;
__m128i A_epi16 = _mm_load_si128((__m128i *)Aptr);
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i E_epi16 = _mm_load_si128((__m128i *)Eptr);
tmp_epi16 = C_epi16;
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
__m128i A_epi32a = _mm_unpackhi_epi16(zero_epi16, A_epi16);
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i E_epi32a = _mm_unpackhi_epi16(zero_epi16, E_epi16);
__m128i A_epi32b = _mm_unpacklo_epi16(zero_epi16, A_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128i E_epi32b = _mm_unpacklo_epi16(zero_epi16, E_epi16);
__m128 Aaps = _mm_cvtepi32_ps(A_epi32a);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Eaps = _mm_cvtepi32_ps(E_epi32a);
__m128 Abps = _mm_cvtepi32_ps(A_epi32b);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
__m128 Ebps = _mm_cvtepi32_ps(E_epi32b);
Aaps = _mm_mul_ps(Aaps, a);
Baps = _mm_mul_ps(Baps, b);
Caps = _mm_mul_ps(Caps, c);
Daps = _mm_mul_ps(Daps, b);
Eaps = _mm_mul_ps(Eaps, a);
Abps = _mm_mul_ps(Abps, a);
Bbps = _mm_mul_ps(Bbps, b);
Cbps = _mm_mul_ps(Cbps, c);
Dbps = _mm_mul_ps(Dbps, b);
Ebps = _mm_mul_ps(Ebps, a);
Aaps = _mm_add_ps(Aaps, Baps);
Aaps = _mm_add_ps(Aaps, Caps);
Aaps = _mm_add_ps(Aaps, Daps);
Aaps = _mm_add_ps(Aaps, Eaps);
Abps = _mm_add_ps(Abps, Bbps);
Abps = _mm_add_ps(Abps, Cbps);
Abps = _mm_add_ps(Abps, Dbps);
Abps = _mm_add_ps(Abps, Ebps);
C_epi32a = _mm_cvtps_epi32(Aaps);
C_epi32b = _mm_cvtps_epi32(Abps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
tmp_epi16 = _mm_and_si128(tmp_epi16, maskA_epi16);
mix_epi16 = _mm_and_si128(mix_epi16, maskRGB_epi16);
mix_epi16 = _mm_add_epi16 (mix_epi16, tmp_epi16);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
else if (taps == 3)
{
switch ((int)(sharpness * 5.0))
{
case 0:
default:
af = 0.000f * adiff - 0.0625f * diff;
cf = 1.000f * adiff + 1.1250f * diff;
break;
case 1: //small sharpen
//a = _mm_set1_ps(-0.0625);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.625);
af = -0.0625f * adiff - 0.1250f * diff;
cf = 1.1250f * adiff + 1.2500f * diff;
break;
case 2: //nice sharpen
//a = _mm_set1_ps(-0.125);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(0.75);
af = -0.1250f * adiff - 0.2500f * diff;
cf = 1.2500f * adiff + 1.5000f * diff;
break;
case 3: //higher
//a = _mm_set1_ps(-0.25);
//b = _mm_set1_ps(0.25);
//c = _mm_set1_ps(1.0);
af = -0.2500f * adiff - 0.5000f * diff;
cf = 1.5000f * adiff + 2.0000f * diff;
break;
case 4: //overkill sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.5);
//c = _mm_set1_ps(1.0);
af = -0.5000f * adiff - 0.5000f * diff;
cf = 2.0000f * adiff + 2.0000f * diff;
break;
case 5: //highest sharpen
//a = _mm_set1_ps(-0.5);
//b = _mm_set1_ps(0.0);
//c = _mm_set1_ps(2.0);
af = -0.5000f;
cf = 2.0000f;
break;
}
a = _mm_set1_ps(af);
c = _mm_set1_ps(cf);
zero_epi16 = _mm_set1_epi16(0);
for (i = 0; i < pixels * 4; i += 8)
{
__m128i mix_epi16;
__m128i B_epi16 = _mm_load_si128((__m128i *)Bptr);
__m128i C_epi16 = _mm_load_si128((__m128i *)Cptr);
__m128i D_epi16 = _mm_load_si128((__m128i *)Dptr);
__m128i tmp_epi16 = C_epi16;
Aptr += 8;
Bptr += 8;
Cptr += 8;
Dptr += 8;
Eptr += 8;
__m128i B_epi32a = _mm_unpackhi_epi16(zero_epi16, B_epi16);
__m128i C_epi32a = _mm_unpackhi_epi16(zero_epi16, C_epi16);
__m128i D_epi32a = _mm_unpackhi_epi16(zero_epi16, D_epi16);
__m128i B_epi32b = _mm_unpacklo_epi16(zero_epi16, B_epi16);
__m128i C_epi32b = _mm_unpacklo_epi16(zero_epi16, C_epi16);
__m128i D_epi32b = _mm_unpacklo_epi16(zero_epi16, D_epi16);
__m128 Baps = _mm_cvtepi32_ps(B_epi32a);
__m128 Caps = _mm_cvtepi32_ps(C_epi32a);
__m128 Daps = _mm_cvtepi32_ps(D_epi32a);
__m128 Bbps = _mm_cvtepi32_ps(B_epi32b);
__m128 Cbps = _mm_cvtepi32_ps(C_epi32b);
__m128 Dbps = _mm_cvtepi32_ps(D_epi32b);
Baps = _mm_mul_ps(Baps, a);
Caps = _mm_mul_ps(Caps, c);
Daps = _mm_mul_ps(Daps, a);
Bbps = _mm_mul_ps(Bbps, a);
Cbps = _mm_mul_ps(Cbps, c);
Dbps = _mm_mul_ps(Dbps, a);
Baps = _mm_add_ps(Baps, Caps);
Baps = _mm_add_ps(Baps, Daps);
Bbps = _mm_add_ps(Bbps, Cbps);
Bbps = _mm_add_ps(Bbps, Dbps);
C_epi32a = _mm_cvtps_epi32(Baps);
C_epi32b = _mm_cvtps_epi32(Bbps);
C_epi32a = _mm_srai_epi32(C_epi32a, 16);
C_epi32b = _mm_srai_epi32(C_epi32b, 16);
mix_epi16 = _mm_packs_epi32(C_epi32b, C_epi32a);
tmp_epi16 = _mm_and_si128(tmp_epi16, maskA_epi16);
mix_epi16 = _mm_and_si128(mix_epi16, maskRGB_epi16);
mix_epi16 = _mm_add_epi16 (mix_epi16, tmp_epi16);
_mm_storeu_si128((__m128i *)output, mix_epi16);
output += 8;
}
}
else
{
memcpy(output, Cptr, pixels * 3 * 2);
}
}
}
void FastBlurHinplace(int width, unsigned short *sptr)
{
unsigned short *outptr = sptr;
int i = 0;
int rneg1;
int gneg1;
int bneg1;
// *outptr++ = *sptr++; //R
// *outptr++ = *sptr++; //G
// *outptr++ = *sptr++; //B
rneg1 = *sptr++; //R
gneg1 = *sptr++; //G
bneg1 = *sptr++; //B
for (i = 1; i < width - 1; i++)
{
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //R
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //G
*outptr++ = ((sptr[-3] + sptr[0] * 2 + sptr[3]) >> 2);
sptr++; //B
}
*outptr++ = *sptr++; //R
*outptr++ = *sptr++; //G
*outptr++ = *sptr++; //B
outptr += 2;
for (i = 2; i < width; i++)
{
//TODO: The GCC compiler warns that the operation on outptr may be undefined
//*outptr-- = outptr[-3];
//*outptr-- = outptr[-3];
//*outptr-- = outptr[-3];
outptr[0] = outptr[-3];
outptr[-1] = outptr[-4];
outptr[-2] = outptr[-5];
outptr -= 3;
}
*outptr-- = bneg1;
*outptr-- = gneg1;
*outptr-- = rneg1;
}
void DoDEBAYER_ORDER_RED_GRN(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
//omp_set_dynamic(64);
//omp_set_num_threads(6);
//#pragma omp parallel for
//for (row = 0; row < height; row+=2)
row = line;
{
int x;
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int even_done = 0, odd_done = 0;
//even rows
#if DEBAYER5x5
if (highquality)
{
if (row > 0 && row < height - 2)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*red cell*/
REDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/*grn cell*/
GRNREDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
even_done = 1;
}
}
#endif
if (!even_done)
{
if (row > 0)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
}
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
else // first row
{
/*red cell*/
grn[rgboffset] = (basebayer[offset + 1] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset + width + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset + width - 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
}
}
//odd rows
#if DEBAYER5x5
if (highquality)
{
if (oddrow > 1 && oddrow < height - 1)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
/* blu */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*grn*/
GRNBLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/* blu */
BLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/* blu */
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
odd_done = 1;
}
}
#endif
if (!odd_done)
{
if (oddrow < height - 1)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/* blu */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/* blu */
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
else // last row
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - width];
blu[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/* blu */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - width];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/* blu */
grn[rgboffset] = basebayer[offset - 1];
red[rgboffset] = basebayer[offset - width - 1];
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width, &red[0]);
FastBlurHinplace(width, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width, &red[0], sharpening);
FastSharpeningBlurHinplace(width, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
void DoDEBAYER_ORDER_GRN_BLU(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
//omp_set_dynamic(64);
//omp_set_num_threads(6);
//#pragma omp parallel for
//for (row = 0; row < height; row+=2)
row = line;
{
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int x, even_done = 0, odd_done = 0;
//even rows
#if DEBAYER5x5
if (highquality)
{
if (row > 0 && row < height - 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
/* blu */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*grn cell*/
GRNBLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/* blu */
BLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*blu*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
even_done = 1;
}
}
#endif
if (!even_done)
{
if (row > 0)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/* blu */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/*blu*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
else // first row
{
/*grn*/
grn[rgboffset] = basebayer[offset]; //g
blu[rgboffset] = basebayer[offset + 1];
red[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*blu*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = (basebayer[offset + width - 1] + basebayer[offset + width + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + width];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/*blu*/
grn[rgboffset] = basebayer[offset - 1];
red[rgboffset] = basebayer[offset + width - 1];
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
}
//odd rows
#if DEBAYER5x5
if (highquality)
{
if (oddrow > 1 && oddrow < height - 1)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*red cell*/
REDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/*grn*/
GRNREDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
odd_done = 1;
}
}
#endif
if (!odd_done)
{
if (oddrow < height - 1)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
}
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
else // last row
{
/*red cell*/
grn[rgboffset] = basebayer[offset - width];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset - width];
offset++, rgboffset += pixelstride;
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = basebayer[offset - width];
offset++, rgboffset += pixelstride;
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width, &red[0]);
FastBlurHinplace(width, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width, &red[0], sharpening);
FastSharpeningBlurHinplace(width, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
void DoDEBAYER_ORDER_GRN_RED(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
//omp_set_dynamic(64);
//omp_set_num_threads(6);
//#pragma omp parallel for
//for (row = 0; row < height; row+=2)
row = line;
{
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int x, even_done = 0, odd_done = 0;
//even rows
#if DEBAYER5x5
if (highquality)
{
if (row > 0 && row < height - 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = (basebayer[offset + width] + basebayer[offset - width] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*grn cell*/
GRNREDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/*red cell*/
REDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*red cell*/
grn[rgboffset] = basebayer[offset - 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
even_done = 1;
}
}
#endif
if (!even_done)
{
if (row > 0)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = (basebayer[offset + width] + basebayer[offset - width] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/*red cell*/
grn[rgboffset] = basebayer[offset - 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
else // first row
{
/*grn*/
grn[rgboffset] = basebayer[offset]; //g
red[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*red*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset + width - 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
}
/*red*/
grn[rgboffset] = basebayer[offset - 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset + width - 1];
offset++, rgboffset += pixelstride;
}
}
//odd rows
#if DEBAYER5x5
if (highquality)
{
if (oddrow > 1 && oddrow < height - 1)
{
/*blu*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*blu*/
BLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/*grn*/
GRNBLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*blu*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset - 1];
offset++, rgboffset += pixelstride;
odd_done = 1;
}
}
#endif
if (!odd_done)
{
if (oddrow < height - 1)
{
/*blu*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*blu*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset - 1];
offset++, rgboffset += pixelstride;
}
else // last row
{
/*blu*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset - width + 1];
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - width];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*blu*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - width];
blu[rgboffset] = basebayer[offset - 1];
offset++, rgboffset += pixelstride;
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width, &red[0]);
FastBlurHinplace(width, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width, &red[0], sharpening);
FastSharpeningBlurHinplace(width, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
void DoDEBAYER_ORDER_BLU_GRN(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
//omp_set_dynamic(64);
//omp_set_num_threads(6);
//#pragma omp parallel for
//for (row = 0; row < height; row+=2)
row = line;
{
int x;
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int even_done = 0, odd_done = 0;
//even rows
#if DEBAYER5x5
if (highquality)
{
if (row > 0 && row < height - 2)
{
/*b cell*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*grn cell*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*b cell*/
BLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/*grn cell*/
GRNBLUCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*b cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
/*grn cell*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - 1];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
even_done = 1;
}
}
#endif
if (!even_done)
{
if (row > 0)
{
/*b cell*/
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
/*b cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
offset++, rgboffset += pixelstride;
}
/*grn cell*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - 1];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
else // first row
{
/*b cell*/
grn[rgboffset] = (basebayer[offset + 1] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + width + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
/*b cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset + width - 1] + basebayer[offset + width + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/*grn cell*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - 1];
red[rgboffset] = basebayer[offset + width];
offset++, rgboffset += pixelstride;
}
}
//odd rows
#if DEBAYER5x5
if (highquality)
{
if (oddrow > 1 && oddrow < height - 1)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
/* r */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
red[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
for (x = 2; x < width - 2; x += 2)
{
/*grn*/
GRNREDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
/* r */
REDCELL(&red[rgboffset], &basebayer[offset], width);
offset++, rgboffset += pixelstride;
}
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
/* r */
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
odd_done = 1;
}
}
#endif
if (!odd_done)
{
if (oddrow < height - 1)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/* r */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + basebayer[offset - width] + basebayer[offset + width] + 2) >> 2;
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + basebayer[offset + width - 1] + basebayer[offset + width + 1] + 2) >> 2;
red[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/* r */
grn[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset + width - 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
else // last row
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width];
red[rgboffset] = basebayer[offset + 1];
offset++, rgboffset += pixelstride;
for (x = 1; x < width - 1; x += 2)
{
/* r */
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - width - 1] + basebayer[offset - width + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset++, rgboffset += pixelstride;
}
/* r */
grn[rgboffset] = basebayer[offset - 1];
blu[rgboffset] = basebayer[offset - width - 1];
red[rgboffset] = basebayer[offset];
offset++, rgboffset += pixelstride;
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width, &red[0]);
FastBlurHinplace(width, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width, &red[0], sharpening);
FastSharpeningBlurHinplace(width, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
// Send one frame to the Debayer Unit (row pitch is in bytes)
void DebayerLine(int width, int height, int linenum,
unsigned short *bayer_source,
DEBAYER_ORDERING order,
unsigned short *RGB_output,
int highquality,
int sharpening)
{
assert(bayer_source != NULL);
if (bayer_source == NULL) return;
assert(RGB_output != NULL);
if (RGB_output == NULL) return;
unsigned short *grn;
unsigned short *red;
unsigned short *blu;
//unsigned short *g1bayer;
//unsigned short *r_bayer;
//unsigned short *b_bayer;
//unsigned short *g2bayer;
int pixelstride = 3;
//int bayerplanar=0;
unsigned short *basebayer = (unsigned short *)bayer_source;
red = RGB_output++;
grn = RGB_output++;
blu = RGB_output++;
RGB_output -= 3;
switch (order)
{
case BAYER_FORMAT_RED_GRN:
DoDEBAYER_ORDER_RED_GRN(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
case BAYER_FORMAT_GRN_BLU:
DoDEBAYER_ORDER_GRN_BLU(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
case BAYER_FORMAT_GRN_RED:
DoDEBAYER_ORDER_GRN_RED(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
case BAYER_FORMAT_BLU_GRN:
DoDEBAYER_ORDER_BLU_GRN(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
}
return;
}
void DoVertical_DEBAYER_ORDER_RED_GRN(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
row = line;
{
int x;
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int even_done = 0, odd_done = 0;
//even rows
if (!even_done)
{
if (row > 0)
{
/*red cell*/
grn[rgboffset] = basebayer[offset + 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + basebayer[offset - width - 1] + basebayer[offset + width - 1] + 2) >> 2;
offset += 2, rgboffset += pixelstride;
}
}
else // first row
{
/*red cell*/
grn[rgboffset] = basebayer[offset + 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset + width + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset + width + 1];
offset += 2, rgboffset += pixelstride;
}
}
}
rgboffset += pixelstride * (width / 2);
if (!odd_done)
{
if (oddrow < height - 1)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
else // last row
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - width];
blu[rgboffset] = basebayer[offset + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset - width];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width / 2, &red[0]);
FastBlurHinplace(width / 2, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width / 2, &red[0], sharpening);
FastSharpeningBlurHinplace(width / 2, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
void DoVertical_DEBAYER_ORDER_GRN_BLU(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
row = line;
{
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int x, even_done = 0, odd_done = 0;
//even rows
if (!even_done)
{
if (row > 0)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = basebayer[offset + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
else // first row
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + width];
blu[rgboffset] = basebayer[offset + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + width];
blu[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
}
rgboffset += pixelstride * (width / 2);
//odd rows
if (!odd_done)
{
if (oddrow < height - 1)
{
/*red cell*/
grn[rgboffset] = basebayer[offset + 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + basebayer[offset - width - 1] + basebayer[offset + width - 1] + 2) >> 2;
offset += 2, rgboffset += pixelstride;
}
}
else // last row
{
/*red cell*/
grn[rgboffset] = basebayer[offset + 1];
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*red cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width + 1];
offset += 2, rgboffset += pixelstride;
}
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width / 2, &red[0]);
FastBlurHinplace(width / 2, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width / 2, &red[0], sharpening);
FastSharpeningBlurHinplace(width / 2, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
void DoVertical_DEBAYER_ORDER_GRN_RED(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
row = line;
{
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int x, even_done = 0, odd_done = 0;
//even rows
if (!even_done)
{
if (row > 0)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = (basebayer[offset + width] + basebayer[offset - width] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = (basebayer[offset + width] + basebayer[offset - width] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
else // first row
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = basebayer[offset + width];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn cell*/
grn[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset + width];
offset += 2, rgboffset += pixelstride;
}
}
}
rgboffset += pixelstride * (width / 2);
//odd rows
if (!odd_done)
{
if (oddrow < height - 1)
{
/*blu*/
grn[rgboffset] = basebayer[offset + 1];
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*blu*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + basebayer[offset - width - 1] + basebayer[offset + width - 1] + 2) >> 2;
blu[rgboffset] = basebayer[offset];
offset += 2, rgboffset += pixelstride;
}
}
else // last row
{
/*blu*/
grn[rgboffset] = basebayer[offset + 1];
red[rgboffset] = basebayer[offset - width + 1];
blu[rgboffset] = basebayer[offset];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*blu*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
red[rgboffset] = basebayer[offset - width + 1];
blu[rgboffset] = basebayer[offset];
offset += 2, rgboffset += pixelstride;
}
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width / 2, &red[0]);
FastBlurHinplace(width / 2, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width / 2, &red[0], sharpening);
FastSharpeningBlurHinplace(width / 2, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
void DoVertical_DEBAYER_ORDER_BLU_GRN(
int width,
int height,
int line,
int pixelstride,
unsigned short *grn,
unsigned short *red,
unsigned short *blu,
unsigned short *basebayer,
int highquality,
int sharpening)
{
int row;
row = line;
{
int x;
int offset = row * width;
int rgboffset = 0;//row*width*pixelstride;
int oddrow = row + 1;
int even_done = 0, odd_done = 0;
//even rows
if (!even_done)
{
if (row > 0)
{
/*b cell*/
grn[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*b cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = (basebayer[offset - width + 1] + basebayer[offset + width + 1] + basebayer[offset - width - 1] + basebayer[offset + width - 1] + 2) >> 2;
offset += 2, rgboffset += pixelstride;
}
}
else // first row
{
/*b cell*/
grn[rgboffset] = basebayer[offset + 1];
blu[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + width + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*b cell*/
grn[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
blu[rgboffset] = basebayer[offset];
red[rgboffset] = basebayer[offset + width + 1];
offset += 2, rgboffset += pixelstride;
}
}
}
rgboffset += pixelstride * (width / 2);
//odd rows
if (!odd_done)
{
if (oddrow < height - 1)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = basebayer[offset + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = (basebayer[offset - width] + basebayer[offset + width] + 1) >> 1;
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
else // last row
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width];
red[rgboffset] = basebayer[offset + 1];
offset += 2, rgboffset += pixelstride;
for (x = 2; x < width; x += 2)
{
/*grn*/
grn[rgboffset] = basebayer[offset];
blu[rgboffset] = basebayer[offset - width];
red[rgboffset] = (basebayer[offset - 1] + basebayer[offset + 1] + 1) >> 1;
offset += 2, rgboffset += pixelstride;
}
}
}
{
switch (sharpening)
{
case 0: // just blur
FastBlurHinplace(width / 2, &red[0]);
FastBlurHinplace(width / 2, &red[width * pixelstride]);
break;
case 1: // blur/sharpen
case 2: // blur/sharpen
case 3: // blur/sharpen
FastSharpeningBlurHinplace(width / 2, &red[0], sharpening);
FastSharpeningBlurHinplace(width / 2, &red[width * pixelstride], sharpening);
break;
default:// do nothing
break;
}
}
}
}
// Send one frame to the Debayer Unit (row pitch is in bytes)
void VerticalOnlyDebayerLine(int width, int height, int linenum,
unsigned short *bayer_source,
DEBAYER_ORDERING order,
unsigned short *RGB_output,
int highquality, int sharpening)
{
assert(bayer_source != NULL);
if (bayer_source == NULL) return;
assert(RGB_output != NULL);
if (RGB_output == NULL) return;
unsigned short *grn;
unsigned short *red;
unsigned short *blu;
int pixelstride = 3;
unsigned short *basebayer = (unsigned short *)bayer_source;
red = RGB_output++;
grn = RGB_output++;
blu = RGB_output++;
RGB_output -= 3;
switch (order)
{
case BAYER_FORMAT_RED_GRN:
DoVertical_DEBAYER_ORDER_RED_GRN(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
case BAYER_FORMAT_GRN_BLU:
DoVertical_DEBAYER_ORDER_GRN_BLU(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
case BAYER_FORMAT_GRN_RED:
DoVertical_DEBAYER_ORDER_GRN_RED(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
case BAYER_FORMAT_BLU_GRN:
DoVertical_DEBAYER_ORDER_BLU_GRN(
width,
height,
linenum,
pixelstride,
grn,
red,
blu,
basebayer,
highquality,
sharpening);
break;
}
return;
}
void ColorDifference2Bayer(int width,
unsigned short *srcptr,
int bayer_pitch,
int bayer_format)
{
int x;
//int i;
unsigned short *bayerptr, *G, *RG, *BG, *GD, *lineA16, *lineB16;
unsigned short buffer[16384]; // was 8192 - could not handle 4.5K RAW
lineA16 = buffer;
lineB16 = lineA16 + bayer_pitch / 2;
bayerptr = srcptr;
G = bayerptr;
RG = G + bayer_pitch / 4;
BG = RG + bayer_pitch / 4;
GD = BG + bayer_pitch / 4;
__m128i gggggggg, ggggggg1, ggggggg2, rgrgrgrg, bgbgbgbg, gdgdgdgd;
__m128i rrrrrrrr, bbbbbbbb;
__m128i mid8192 = _mm_set1_epi16(8192);
//__m128i mid16384 = _mm_set1_epi16(16384);
//__m128i mid32768 = _mm_set1_epi16(32768);
__m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff - 0x3fff);
int sse2width = width & 0xfff8;
x = 0;
for (; x < sse2width; x += 8) //TODO SSE version
{
gggggggg = _mm_loadu_si128((__m128i *)G);
G += 8;
rgrgrgrg = _mm_loadu_si128((__m128i *)RG);
RG += 8;
bgbgbgbg = _mm_loadu_si128((__m128i *)BG);
BG += 8;
gdgdgdgd = _mm_loadu_si128((__m128i *)GD);
GD += 8;
gggggggg = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned
rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned
bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned
gdgdgdgd = _mm_srli_epi16(gdgdgdgd, 2);// 14bit unsigned
gdgdgdgd = _mm_subs_epi16(gdgdgdgd, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, gggggggg); // -16382 to 32767
bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, gggggggg); // -16382 to 32767
ggggggg1 = _mm_adds_epi16(gggggggg, gdgdgdgd);// -8191 to 8191 14bit signed
ggggggg2 = _mm_subs_epi16(gggggggg, gdgdgdgd);// -8191 to 8191 14bit signed
//limit to 0 to 16383
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16);
rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16);
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16);
bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16);
ggggggg1 = _mm_adds_epi16(ggggggg1, overflowprotectRGB_epi16);
ggggggg1 = _mm_subs_epu16(ggggggg1, overflowprotectRGB_epi16);
ggggggg2 = _mm_adds_epi16(ggggggg2, overflowprotectRGB_epi16);
ggggggg2 = _mm_subs_epu16(ggggggg2, overflowprotectRGB_epi16);
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535
ggggggg1 = _mm_slli_epi16(ggggggg1, 2); // restore to 0 to 65535
ggggggg2 = _mm_slli_epi16(ggggggg2, 2); // restore to 0 to 65535
switch (bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
_mm_storeu_si128((__m128i *)lineA16, _mm_unpacklo_epi16(rrrrrrrr, ggggggg1));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineA16, _mm_unpackhi_epi16(rrrrrrrr, ggggggg1));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpacklo_epi16(ggggggg2, bbbbbbbb));
lineB16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpackhi_epi16(ggggggg2, bbbbbbbb));
lineB16 += 8;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
_mm_storeu_si128((__m128i *)lineA16, _mm_unpacklo_epi16(ggggggg1, rrrrrrrr));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineA16, _mm_unpackhi_epi16(ggggggg1, rrrrrrrr));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpacklo_epi16(bbbbbbbb, ggggggg2));
lineB16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpackhi_epi16(bbbbbbbb, ggggggg2));
lineB16 += 8;
break;
case BAYER_FORMAT_GRN_BLU:
_mm_storeu_si128((__m128i *)lineA16, _mm_unpacklo_epi16(ggggggg1, bbbbbbbb));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineA16, _mm_unpackhi_epi16(ggggggg1, bbbbbbbb));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpacklo_epi16(rrrrrrrr, ggggggg2));
lineB16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpackhi_epi16(rrrrrrrr, ggggggg2));
lineB16 += 8;
break;
case BAYER_FORMAT_BLU_GRN:
_mm_storeu_si128((__m128i *)lineA16, _mm_unpacklo_epi16(bbbbbbbb, ggggggg1));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineA16, _mm_unpackhi_epi16(bbbbbbbb, ggggggg1));
lineA16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpacklo_epi16(ggggggg2, rrrrrrrr));
lineB16 += 8;
_mm_storeu_si128((__m128i *)lineB16, _mm_unpackhi_epi16(ggggggg2, rrrrrrrr));
lineB16 += 8;
break;
}
}
for (; x < width; x++)
{
int r, g, b, rg, bg, gd, g1, g2;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
if (r < 0) r = 0;
if (g1 < 0) g1 = 0;
if (g2 < 0) g2 = 0;
if (b < 0) b = 0;
if (r > 0xffff) r = 0xffff;
if (g1 > 0xffff) g1 = 0xffff;
if (g2 > 0xffff) g2 = 0xffff;
if (b > 0xffff) b = 0xffff;
switch (bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*lineA16++ = r;
*lineA16++ = g1;
*lineB16++ = g2;
*lineB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*lineA16++ = g1;
*lineA16++ = r;
*lineB16++ = b;
*lineB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*lineA16++ = g1;
*lineA16++ = b;
*lineB16++ = r;
*lineB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*lineA16++ = b;
*lineA16++ = g1;
*lineB16++ = g2;
*lineB16++ = r;
break;
}
}
memcpy(bayerptr, buffer, bayer_pitch * 2);
}
void BayerRippleFilter( int width,
unsigned short *srcptr,
int bayer_pitch,
int bayer_format,
unsigned short *srcbase)
{
unsigned char *line = (unsigned char *)srcptr;
unsigned short *outA16;
int x, offset = bayer_pitch / 2;
outA16 = (unsigned short *)line;
// If on a red line, move to a blue line
//Normalize to a blue pixel for the start point
switch (bayer_format)
{
case BAYER_FORMAT_GRN_RED:
outA16 -= offset;
break;
case BAYER_FORMAT_RED_GRN:
outA16 -= offset;
outA16 ++; //blue
break;
case BAYER_FORMAT_GRN_BLU:
outA16 ++; //blue
break;
case BAYER_FORMAT_BLU_GRN:
//blue
break;
}
if (&outA16[-2 * offset - 2] < srcbase)
return; //HACK to make sure we are reading within the picture
{
outA16++; //b
outA16++; //g
outA16++; //r
//now point to green
for (x = 2; x < width - 2; x++)
{
int mn, mx, g;
int range = 8 * 256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset + 1];
if (mn > outA16[offset - 1]) mn = outA16[offset - 1];
if (mx < outA16[offset - 1]) mx = outA16[offset - 1];
if ((outA16[-offset - 1] & 1) == 0)
{
if (mn > outA16[-offset - 1]) mn = outA16[-offset - 1];
if (mx < outA16[-offset - 1]) mx = outA16[-offset - 1];
}
if ((outA16[-offset + 1] & 1) == 0)
{
if (mn > outA16[-offset + 1]) mn = outA16[-offset + 1];
if (mx < outA16[-offset + 1]) mx = outA16[-offset + 1];
}
delta = mx - mn;
if (delta < range && ((mn - range < g && g < mn) || (mx + range > g && g > mx)))
{
int gmn, gmx;
gmn = gmx = g;
if ((outA16[-2 * offset - 2] & 1) == 0)
{
if (gmn > outA16[-2 * offset - 2]) gmn = outA16[-2 * offset - 2];
if (gmx < outA16[-2 * offset - 2]) gmx = outA16[-2 * offset - 2];
}
if ((outA16[-2 * offset] & 1) == 0)
{
if (gmn > outA16[-2 * offset]) gmn = outA16[-2 * offset];
if (gmx < outA16[-2 * offset]) gmx = outA16[-2 * offset];
}
if ((outA16[-2 * offset + 2] & 1) == 0)
{
if (gmn > outA16[-2 * offset + 2]) gmn = outA16[-2 * offset + 2];
if (gmx < outA16[-2 * offset + 2]) gmx = outA16[-2 * offset + 2];
}
if ((outA16[-2] & 1) == 0)
{
if (gmn > outA16[-2]) gmn = outA16[-2];
if (gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if (gmn > outA16[2 * offset - 2]) gmn = outA16[2 * offset - 2];
if (gmx < outA16[2 * offset - 2]) gmx = outA16[2 * offset - 2];
if (gmn > outA16[2 * offset]) gmn = outA16[2 * offset];
if (gmx < outA16[2 * offset]) gmx = outA16[2 * offset];
if (gmn > outA16[2 * offset + 2]) gmn = outA16[2 * offset + 2];
if (gmx < outA16[2 * offset + 2]) gmx = outA16[2 * offset + 2];
if (gmn > outA16[2]) gmn = outA16[2];
if (gmx < outA16[2]) gmx = outA16[2];
if ((gmx - gmn) < range)
{
alpha = range;//delta;
if (g > mx)
{
alpha *= (g - mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn - g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if ( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha * g + (range - alpha) * ((mn + mx) >> 1)) >> shift;
if (val > 0xffff) val = 0xffff;
if (val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
float *LoadCube64_3DLUT(DECODER *decoder, CFHDDATA *cfhddata, int *lutsize)
{
int size = 0;
float *LUT = NULL;
bool useLUT = false;
CFLook_Header CFLKhdr;
FILE *fp = NULL;
int err = 0;
if (cfhddata->user_look_CRC != 0 && decoder)
{
if (cfhddata->user_look_CRC == decoder->LUTcacheCRC && decoder->LUTcache != NULL)
{
*lutsize = decoder->LUTcacheSize;
return decoder->LUTcache;
}
else if (decoder->LUTcache != NULL)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->LUTcache);
#else
MEMORY_FREE(decoder->LUTcache);
#endif
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
decoder->LUTcacheSize = 0;
}
if (cfhddata->user_look_CRC == 0x3f6f5788) // Default Protune preview LUT
{
*lutsize = size = 32;
/* float PreviewLUT[32];
for(i=0; i<size; i++)
{
PreviewLUT[i] = 0.5 * sinf(3.14159265359 * (float)i/(float)(size-1) + 4.71238898038) + 0.5;
{
char t[100];
sprintf(t, "%f", PreviewLUT[i]);
OutputDebugString(t);
}
} */
float PreviewLUT[32] =
{
0.000000f,
0.002565f,
0.010235f,
0.022930f,
0.040521f,
0.062827f,
0.089618f,
0.120621f,
0.155517f,
0.193947f,
0.235518f,
0.279803f,
0.326347f,
0.374674f,
0.424286f,
0.474675f,
0.525325f,
0.575714f,
0.625326f,
0.673653f,
0.720197f,
0.764482f,
0.806053f,
0.844483f,
0.879379f,
0.910382f,
0.937173f,
0.959479f,
0.977070f,
0.989765f,
0.997435f,
1.000000f
};
#if _ALLOCATOR
LUT = (float *)Alloc(decoder->allocator, 4 * size * size * size * 3);
#else
LUT = (float *)MEMORY_ALLOC(4 * size * size * size * 3);
#endif
if (LUT)
{
int r, g, b;
float *fptr = LUT;
for (r = 0; r < size; r++)
{
for (g = 0; g < size; g++)
{
for (b = 0; b < size; b++)
{
*fptr++ = PreviewLUT[b];
*fptr++ = PreviewLUT[g];
*fptr++ = PreviewLUT[r];
}
}
}
decoder->LUTcacheCRC = cfhddata->user_look_CRC;
decoder->LUTcache = LUT;
decoder->LUTcacheSize = size;
return LUT;
}
}
if (err == 0 && fp != NULL)
{
int endianswap = 0;
int validcflook = 0;
int len = 0;
#ifdef _MSVC_VER
len = (int)fread_s(&CFLKhdr, sizeof(CFLook_Header), 1, sizeof(CFLook_Header), fp);
#else
len = (int)fread(&CFLKhdr, 1, sizeof(CFLook_Header), fp);
#endif
if (MAKEID('C', 'F', 'L', 'K') == CFLKhdr.CFLK_ID)
{
endianswap = true;
validcflook = true;
}
else if (MAKEID_SWAP('C', 'F', 'L', 'K') == CFLKhdr.CFLK_ID)
{
validcflook = true;
}
if (validcflook && len > 0)
{
if (endianswap)
{
*lutsize = size = SwapInt32(CFLKhdr.lutsize);
if (size >= 8 && size <= 65)
{
#if _ALLOCATOR
LUT = (float *)Alloc(decoder->allocator, 4 * size * size * size * 3);
#else
LUT = (float *)MEMORY_ALLOC(4 * size * size * size * 3);
#endif
if (LUT)
{
fseek(fp, SwapInt32(CFLKhdr.hdrsize), SEEK_SET);
len = (int)fread(LUT, 4, size * size * size * 3, fp);
if (len == size * size * size * 3)
{
unsigned int *uiLUT = (unsigned int *)LUT;
for (int i = 0; i < len; i++)
{
uiLUT[i] = SwapInt32(uiLUT[i]);
}
useLUT = true;
}
else
{
#if _ALLOCATOR
Free(decoder->allocator, LUT);
#else
MEMORY_FREE(LUT);
#endif
LUT = NULL;
}
}
}
}
else
{
*lutsize = size = CFLKhdr.lutsize;
if (size >= 8 && size <= 65)
{
#if _ALLOCATOR
LUT = (float *)Alloc(decoder->allocator, 4 * size * size * size * 3);
#else
LUT = (float *)MEMORY_ALLOC(4 * size * size * size * 3);
#endif
if (LUT)
{
fseek(fp, CFLKhdr.hdrsize, SEEK_SET);
len = (int)fread(LUT, 4, size * size * size * 3, fp);
if (len == size * size * size * 3)
{
useLUT = true;
}
else
{
MEMORY_FREE(LUT);
LUT = NULL;
}
}
}
}
}
fclose(fp);
}
}
if (decoder)
{
if (useLUT)
{
decoder->LUTcacheCRC = cfhddata->user_look_CRC;
decoder->LUTcache = LUT;
decoder->LUTcacheSize = *lutsize;
}
else
{
decoder->LUTcacheCRC = 0;
decoder->LUTcache = NULL;
decoder->LUTcacheSize = 0;
}
}
return LUT;
}
float *ResetCube64_3DLUT(DECODER *decoder, int cube_base)
{
//int len = 0;
int size = 1 << cube_base;
float *LUT = NULL;
#if _ALLOCATOR
LUT = (float *)Alloc(decoder->allocator, 4 * size * size * size * 3);
#else
LUT = (float *)MEMORY_ALLOC(4 * size * size * size * 3);
#endif
if (LUT)
{
int r, g, b, pos = 0;
for (b = 0; b < size; b++)
{
for (g = 0; g < size; g++)
{
for (r = 0; r < size; r++)
{
LUT[pos++] = (float)r / (float)(size - 1);
LUT[pos++] = (float)g / (float)(size - 1);
LUT[pos++] = (float)b / (float)(size - 1);
}
}
}
}
return LUT;
}
//DAN20100927 -- return 0 if to tag groups can the same data types are sizes, although many different content
int CompareTags(unsigned char *ptr1, unsigned char *ptr2, int len)
{
int ret = 0, size;
uint32_t *src = (uint32_t *)ptr1;
uint32_t *dst = (uint32_t *)ptr2;
len >>= 2;
while (len >= 3)
{
if (src[0] != dst[0]) //tag
{
ret = 1; // no match
break;
}
if (src[1] != dst[1]) //typesize
{
ret = 1; // no match
break;
}
size = ((src[1] & 0xffffff) + 3) >> 2;
size += 2; //tag + typesize
src += size;
dst += size;
len -= size;
}
return ret;
}
void UpdateCFHDDATA(DECODER *decoder, unsigned char *ptr, int len, int delta, int priority)
{
int chn = 0;
CFHDDATA *cfhddata = NULL;
if (decoder)
cfhddata = &decoder->cfhddata;
if (delta)
chn = delta;
if (cfhddata && ptr && len) // overrides form database or external control
{
//unsigned char *base = ptr;
void *data;
unsigned char type;
int pos = 0;
size_t size, copysize;
uint32_t tag;
//void *metadatastart = data;
float tmp;
bool terminate = false;
int localpri = priority;
if (decoder->metadatachunks < METADATA_CHUNK_MAX)
{
int i;
bool found = false;
for (i = 0; i < decoder->metadatachunks; i++)
{
if (decoder->mdc_size[i] == len)
{
if (0 == CompareTags(decoder->mdc[i], ptr, len))
{
memcpy(decoder->mdc[i], ptr, len); // If same info type is present, use the later info (e.g. latest relevant keyframe.)
found = true;
break;
}
}
}
if (!found)
{
#if _ALLOCATOR
if (decoder->mdc[decoder->metadatachunks])
Free(decoder->allocator, decoder->mdc[decoder->metadatachunks]);
decoder->mdc[decoder->metadatachunks] = (unsigned char *)Alloc(decoder->allocator, len);
#else
if (decoder->mdc[decoder->metadatachunks])
MEMORY_FREE(decoder->mdc[decoder->metadatachunks]);
decoder->mdc[decoder->metadatachunks] = (unsigned char *)MEMORY_ALLOC(len);
#endif
if (decoder->mdc[decoder->metadatachunks])
memcpy(decoder->mdc[decoder->metadatachunks], ptr, len);
decoder->mdc_size[decoder->metadatachunks] = len;
decoder->metadatachunks++;
}
}
while (pos + 12 <= len && !terminate)
{
data = (void *)&ptr[8];
type = ptr[7];
size = ptr[4] + (ptr[5] << 8) + (ptr[6] << 16);
tag = MAKETAG(ptr[0], ptr[1], ptr[2], ptr[3]);
#if _WIN32 && _DEBUG && 0
if (type == 'f')
{
char t[1000], tt[100];
int cc = 16;
int lsize = (int)size;
float *fdata = (float *)data;
sprintf(t, "%c%c%c%c %1.8f ", ptr[0], ptr[1], ptr[2], ptr[3], *fdata++);
while (lsize > 4 && cc > 0)
{
sprintf(tt, "%1.8f ", *fdata++), lsize -= 4, cc--;
strcat(t, tt);
}
OutputDebugString(t);
}
if (type == 'L')
{
char t[1000], tt[100];
int cc = 16;
int lsize = (int)size;
int *ddata = (int *)data;
sprintf(t, "%c%c%c%c %d ", ptr[0], ptr[1], ptr[2], ptr[3], *ddata++);
while (lsize > 4 && cc > 0)
{
sprintf(tt, "%d ", *ddata++), lsize -= 4, cc--;
strcat(t, tt);
}
OutputDebugString(t);
}
if (type == 'H')
{
char t[1000], tt[100];
int cc = 16;
int lsize = (int)size;
int *ddata = (int *)data;
sprintf(t, "%c%c%c%c %08X ", ptr[0], ptr[1], ptr[2], ptr[3], *ddata++);
while (lsize > 4 && cc > 0)
{
sprintf(tt, "%08X ", *ddata++), lsize -= 4, cc--;
strcat(t, tt);
}
OutputDebugString(t);
}
#endif
switch (tag)
{
case 0:
terminate = true;
break;
case TAG_CLIP_GUID:
if (size == sizeof(cfhddata->clip_guid))
memcpy(&cfhddata->clip_guid, data, size);
break;
case TAG_PROCESS_PATH:
if (!delta)
{
unsigned int val = *((unsigned int *)data);
if (val & PROCESSING_ACTIVE2)
cfhddata->process_path_flags = val;
else
{
cfhddata->process_path_flags &= 0xffffff00;
cfhddata->process_path_flags |= (val & 0xff);
}
}
break;
case TAG_COLORSPACE_YUV: // 601/709
if (*((uint32_t *)data) & 1)
{
cfhddata->colorspace &= ~COLOR_SPACE_BT_709;
cfhddata->colorspace |= COLOR_SPACE_BT_601;
}
if (*((uint32_t *)data) & 2)
{
cfhddata->colorspace &= ~COLOR_SPACE_BT_601;
cfhddata->colorspace |= COLOR_SPACE_BT_709;
}
decoder->frame.colorspace_override = cfhddata->colorspace;
break;
case TAG_COLORSPACE_RGB: // cgRGB/vsRGB
if (*((uint32_t *)data) & 1)
{
cfhddata->colorspace &= ~COLOR_SPACE_VS_RGB;
}
if (*((uint32_t *)data) & 2)
{
cfhddata->colorspace |= COLOR_SPACE_VS_RGB;
}
if ((cfhddata->colorspace & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709)) == 0) // YUV mode not set
cfhddata->colorspace |= COLOR_SPACE_BT_709;
decoder->frame.colorspace_override = cfhddata->colorspace;
break;
case TAG_COLORSPACE_LIMIT:
if (*((uint32_t *)data) == 1)
decoder->broadcastLimit = 1;
else
decoder->broadcastLimit = 0;
break;
case TAG_COLORSPACE_FTR: // 422 dup'd/422to444 filtered
if (*((uint32_t *)data) & 1)
{
cfhddata->colorspace |= COLOR_SPACE_422_TO_444;
}
else
{
cfhddata->colorspace &= ~COLOR_SPACE_422_TO_444;
}
break;
case TAG_PIXEL_RATIO:
if (type == 'R' || type == 'H') // some older bitstreams under 'H' instead of 'R'
{
uint32_t val = *((uint32_t *)data);
decoder->pixel_aspect_x = (val >> 16) & 0xffff;
decoder->pixel_aspect_y = val & 0xffff;
}
break;
case TAG_MIX_DOWN_ALPHA:
decoder->useAlphaMixDown[0] = *((uint32_t *)data);
if (size >= 8)
decoder->useAlphaMixDown[1] = *(((uint32_t *)data) + 1);
break;
case TAG_CALIBRATE:
cfhddata->calibration = *((uint32_t *)data);
break;
case TAG_BAYER_FORMAT:
cfhddata->bayer_format = *((unsigned int *)data);
break;
case TAG_CHANNELS_ACTIVE:
if (!delta)
{
cfhddata->MSChannel_type_value &= 0xffffff00;
cfhddata->MSChannel_type_value |= *((unsigned int *)data);
}
break;
case TAG_CHANNELS_MIX:
if (!delta)
{
cfhddata->MSChannel_type_value &= 0xffff00ff;
cfhddata->MSChannel_type_value |= *((unsigned int *)data) << 8;
}
break;
case TAG_CHANNELS_MIX_VAL:
if (!delta)
{
cfhddata->MSChannel_type_value &= 0x0000ffff;
cfhddata->MSChannel_type_value |= (*((unsigned int *)data)) << 16;
cfhddata->split_pos_xy = ((*((unsigned int *)data)) >> 16) & 0xffff;
}
break;
case TAG_DEMOSAIC_TYPE:
if (!delta)
cfhddata->demosaic_type = *((unsigned int *)data);
break;
case TAG_CHANNEL_SWAP:
{
size_t lsize = size;
if (lsize > sizeof(unsigned long))
lsize = sizeof(unsigned long);
if (*((uint32_t *)data) == 0)
cfhddata->FramingFlags &= ~2;
else
cfhddata->FramingFlags |= 2;
}
break;
case TAG_LENS_GOPRO:
cfhddata->lensGoPro = *((uint32_t *)data);
break;
case TAG_LENS_SPHERE:
cfhddata->lensSphere = *((uint32_t *)data);
break;
case TAG_LENS_FILL:
cfhddata->lensFill = *((uint32_t *)data);
break;
case TAG_LENS_STYLE:
cfhddata->lensStyleSel = *((uint32_t *)data);
switch (cfhddata->lensStyleSel)
{
case 0:
cfhddata->lensGoPro = -1;
cfhddata->lensSphere = 0;
cfhddata->lensFill = 0;
break;
case 1: //GoPro
cfhddata->lensGoPro = 1;
cfhddata->lensSphere = 1;
cfhddata->lensFill = 0;
break;
case 2: //GoPro + fill
cfhddata->lensGoPro = 1;
cfhddata->lensSphere = 1;
cfhddata->lensFill = 1;
break;
case 3: //equi-rect
cfhddata->lensGoPro = 2;
cfhddata->lensSphere = 1;
cfhddata->lensFill = 0;
break;
case 4: //custom lens
cfhddata->lensGoPro = 4;
cfhddata->lensSphere = 1;
cfhddata->lensFill = 0;
break;
/* case 3: //Rectinlinear
cfhddata->lensGoPro = 0;
cfhddata->lensSphere = 1;
cfhddata->lensFill = 0;
break;
case 4: //Rectinlinear + fill
cfhddata->lensGoPro = 0;
cfhddata->lensSphere = 1;
cfhddata->lensFill = 1;
break; */
// case 6: //DE-fish
// cfhddata->lensGoPro = 3;
// cfhddata->lensSphere = 0;
// cfhddata->lensFill = 0;
//break;
}
break;
case TAG_LENS_SRC_PARAMS:
memcpy(&cfhddata->lensCustomSRC, data, size <= sizeof(cfhddata->lensCustomSRC) ? size : sizeof(cfhddata->lensCustomSRC));
break;
case TAG_LENS_DST_PARAMS:
memcpy(&cfhddata->lensCustomDST, data, size <= sizeof(cfhddata->lensCustomDST) ? size : sizeof(cfhddata->lensCustomDST));
break;
case TAG_CHANNEL_FLIP:
//(1(Horiz)|2(Vert))<<channel num. 0 = no flip, 1 = h flip chn.1, 4 h flip chn.2, 0xf v/h flip chns.1&2, etc
if (!delta)
{
cfhddata->channel_flip = *((uint32_t *)data);
}
break;
case TAG_ENCODE_PRESET: // used by BYR4 inputs to indicate the source data is not linear.
if (!delta)
cfhddata->encode_curve_preset = *((unsigned int *)data);
break;
case TAG_ENCODE_CURVE:
if (!delta)
cfhddata->encode_curve = *((unsigned int *)data);
break;
case TAG_DECODE_CURVE:
if (!delta)
cfhddata->decode_curve = *((unsigned int *)data);
break;
case TAG_PRIMARIES_CURVE:
if (!delta)
{
cfhddata->PrimariesUseDecodeCurve = (*((unsigned int *)data) == CURVE_LINEAR ? 0 : 1);
}
break;
case TAG_CPU_MAX:
cfhddata->cpu_limit = *((uint32_t *)data);
if (decoder->thread_cntrl.capabilities && cfhddata->cpu_limit > 0)
{
int cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > (int)cfhddata->cpu_limit)
{
cpus = cfhddata->cpu_limit;
decoder->thread_cntrl.capabilities &= 0xffff;
decoder->thread_cntrl.capabilities |= cpus << 16;
}
}
break;
case TAG_AFFINITY_MASK:
cfhddata->cpu_affinity = *((uint32_t *)data);
break;
case TAG_IGNORE_DATABASE:
cfhddata->ignore_disk_database = *((uint32_t *)data);
break;
case TAG_FORCE_DATABASE:
cfhddata->force_disk_database = *((uint32_t *)data);
break;
case TAG_UPDATE_LAST_USED:
cfhddata->update_last_used = *((uint32_t *)data);
break;
case TAG_UNIQUE_FRAMENUM:
decoder->codec.unique_framenumber = *((uint32_t *)data);
break;
case TAG_TIMECODE:
#ifdef _WIN32
strncpy_s(cfhddata->FileTimecodeData.orgtime, sizeof(cfhddata->FileTimecodeData.orgtime), (char *)data, 15);
#else
strncpy(cfhddata->FileTimecodeData.orgtime, (char *)data, 15);
#endif
break;
case TAG_TIMECODE_BASE:
cfhddata->timecode_base = *((uint32_t *)data);
break;
case TAG_PREFORMATTED_3D:
decoder->preformatted_3D_type = *((unsigned int *)data);
break;
// Moved out for now
case TAG_OVERLAYS:
{
size_t lsize = size;
if (lsize > sizeof(uint32_t))
lsize = sizeof(uint32_t);
if (*((uint32_t *)data) == 0)
cfhddata->BurninFlags &= ~1;
else
cfhddata->BurninFlags |= 1;
}
break;
case TAG_TOOLS:
{
size_t lsize = size;
if (lsize > sizeof(uint32_t))
lsize = sizeof(uint32_t);
if (*((uint32_t *)data) == 0)
cfhddata->BurninFlags &= ~2;
else
cfhddata->BurninFlags |= 2;
}
break;
}
{
switch (tag)
{
case TAG_LOOK_CRC:
if (!delta)
{
cfhddata->user_look_CRC = *((unsigned int *)data);
if (cfhddata->user_look_CRC == 0)
cfhddata->process_path_flags &= ~PROCESSING_LOOK_FILE;
}
break;
case TAG_LOOK_FILE:
if (!delta)
{
int copysize = (int)size;
if (copysize > 39) copysize = 39;
#ifdef _WIN32
strncpy_s(cfhddata->look_filename, sizeof(cfhddata->look_filename), (char *)data, copysize);
#else
strncpy(cfhddata->look_filename, (char *)data, copysize);
#endif
cfhddata->look_filename[copysize] = '\0';
}
break;
case TAG_LOOK_EXPORT:
if (!delta)
{
if (0 != strncmp(cfhddata->look_export_path, (char *)data, size))
{
#ifdef _WIN32
strncpy_s(cfhddata->look_export_path, sizeof(cfhddata->look_export_path), (char *)data, size);
#else
strncpy(cfhddata->look_export_path, (char *)data, size);
#endif
cfhddata->look_export_path[size] = '\0';
cfhddata->export_look = 1;
}
}
break;
case TAG_WHITE_BALANCE:
if (delta)
{
size_t i;
int col = 0;
float *fptr = (float *)data;
for (i = 0; i < size / sizeof(float); i++)
{
tmp = *fptr++;// - 1.0; //DAN20100922 -- Fix error in who left/right White balance was calculated.
if (i != 2) //second green skip
{
cfhddata->channel[chn].white_balance[col] = cfhddata->channel[0].white_balance[col] * tmp;
if (cfhddata->channel[chn].white_balance[col] < 0.4f) cfhddata->channel[chn].white_balance[col] = 0.4f;
if (cfhddata->channel[chn].white_balance[col] > 10.0f) cfhddata->channel[chn].white_balance[col] = 10.0f;
col++;
}
}
}
else
{
size_t i;
int col = 0;
float *fptr = (float *)data;
for (i = 0; i < size / sizeof(float); i++)
{
tmp = *fptr++;
if (i != 2) //second green skip
{
cfhddata->channel[0].white_balance[col] = tmp;
if (cfhddata->channel[0].white_balance[col] < 0.4f) cfhddata->channel[0].white_balance[col] = 0.4f;
if (cfhddata->channel[0].white_balance[col] > 10.0f) cfhddata->channel[0].white_balance[col] = 10.0f;
cfhddata->channel[1].white_balance[col] = cfhddata->channel[0].white_balance[col];
cfhddata->channel[2].white_balance[col] = cfhddata->channel[0].white_balance[col];
col++;
}
}
}
break;
case TAG_COLOR_MATRIX:
if (delta)
{
size_t i;
float *fptr = (float *)data;
float *fcolm = &cfhddata->orig_colormatrix[0][0];
float *fcolm2 = &cfhddata->custom_colormatrix[0][0];
for (i = 0; i < size / sizeof(float); i++)
{
tmp = *fptr++;
if (priority >= METADATA_PRIORITY_OVERRIDE)
*fcolm2++ += tmp;
else
{
*fcolm++ += tmp;
*fcolm2++ += tmp;
}
}
}
else
{
if (priority >= METADATA_PRIORITY_OVERRIDE)
{
memcpy(cfhddata->custom_colormatrix, data, size);
}
else
{
memcpy(cfhddata->orig_colormatrix, data, size);
memcpy(cfhddata->custom_colormatrix, data, size);
}
}
break;
case TAG_GAMMA_TWEAKS:
{
size_t i;
float *fptr = (float *)data;
for (i = 0; i < size / sizeof(float); i++)
{
tmp = *fptr++;
if (delta)
{
cfhddata->channel[chn].user_rgb_gamma[i] = cfhddata->channel[0].user_rgb_gamma[i] + tmp;
if (cfhddata->channel[chn].user_rgb_gamma[i] < 0.01f) cfhddata->channel[chn].user_rgb_gamma[i] = 0.01f;
if (cfhddata->channel[chn].user_rgb_gamma[i] > 10.0f) cfhddata->channel[chn].user_rgb_gamma[i] = 10.0f;
}
else
{
cfhddata->channel[0].user_rgb_gamma[i] = tmp;
if (cfhddata->channel[0].user_rgb_gamma[i] < 0.01f) cfhddata->channel[0].user_rgb_gamma[i] = 0.01f;
if (cfhddata->channel[0].user_rgb_gamma[i] > 10.0f) cfhddata->channel[0].user_rgb_gamma[i] = 10.0f;
cfhddata->channel[1].user_rgb_gamma[i] = cfhddata->channel[0].user_rgb_gamma[i];
cfhddata->channel[2].user_rgb_gamma[i] = cfhddata->channel[0].user_rgb_gamma[i];
}
}
}
break;
case TAG_RGB_GAIN:
{
size_t i;
float *fptr = (float *)data;
for (i = 0; i < size / sizeof(float); i++)
{
tmp = *fptr++;
if (delta)
{
cfhddata->channel[chn].user_rgb_gain[i] = cfhddata->channel[0].user_rgb_gain[i] * tmp;
if (cfhddata->channel[chn].user_rgb_gain[i] < 0.0) cfhddata->channel[chn].user_rgb_gain[i] = 0.0;
if (cfhddata->channel[chn].user_rgb_gain[i] > 10.0) cfhddata->channel[chn].user_rgb_gain[i] = 10.0;
}
else
{
cfhddata->channel[0].user_rgb_gain[i] = tmp; // unity at 1.0
if (cfhddata->channel[0].user_rgb_gain[i] < 0.0) cfhddata->channel[0].user_rgb_gain[i] = 0.0;
if (cfhddata->channel[0].user_rgb_gain[i] > 10.0) cfhddata->channel[0].user_rgb_gain[i] = 10.0;
cfhddata->channel[1].user_rgb_gain[i] = cfhddata->channel[0].user_rgb_gain[i];
cfhddata->channel[2].user_rgb_gain[i] = cfhddata->channel[0].user_rgb_gain[i];
}
}
}
break;
case TAG_RGB_OFFSET:
{
size_t i;
float *fptr = (float *)data;
for (i = 0; i < size / sizeof(float); i++)
{
tmp = *fptr++;
if (delta)
{
cfhddata->channel[chn].user_rgb_lift[i] = cfhddata->channel[0].user_rgb_lift[i] + tmp;
if (cfhddata->channel[chn].user_rgb_lift[i] < -1.0) cfhddata->channel[chn].user_rgb_lift[i] = -1.0;
if (cfhddata->channel[chn].user_rgb_lift[i] > 1.0) cfhddata->channel[chn].user_rgb_lift[i] = 1.0;
}
else
{
cfhddata->channel[0].user_rgb_lift[i] = tmp;
if (cfhddata->channel[0].user_rgb_lift[i] < -1.0) cfhddata->channel[0].user_rgb_lift[i] = -1.0;
if (cfhddata->channel[0].user_rgb_lift[i] > 1.0) cfhddata->channel[0].user_rgb_lift[i] = 1.0;
cfhddata->channel[1].user_rgb_lift[i] = cfhddata->channel[0].user_rgb_lift[i];
cfhddata->channel[2].user_rgb_lift[i] = cfhddata->channel[0].user_rgb_lift[i];
}
}
}
break;
case TAG_SATURATION:
{
if (delta)
{
cfhddata->channel[chn].user_saturation = cfhddata->channel[0].user_saturation + (*((float *)data)); // unity at 0.0
if (cfhddata->channel[chn].user_saturation < -1.0) cfhddata->channel[chn].user_saturation = -1.0;
if (cfhddata->channel[chn].user_saturation > 10.0) cfhddata->channel[chn].user_saturation = 10.0;
}
else
{
cfhddata->channel[0].user_saturation = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_saturation < -1.0f) cfhddata->channel[0].user_saturation = -1.0f;
if (cfhddata->channel[0].user_saturation > 10.0f) cfhddata->channel[0].user_saturation = 10.0f;
cfhddata->channel[1].user_saturation = cfhddata->channel[0].user_saturation;
cfhddata->channel[2].user_saturation = cfhddata->channel[0].user_saturation;
}
}
break;
case TAG_BLUR_SHARPEN:
tmp = *((float *)data);
if (tmp < -1.0) tmp = -1.0;
if (tmp > 1.0) tmp = 1.0;
//cfhddata->blur_sharpen = tmp;
{
if (delta)
{
cfhddata->channel[chn].user_blur_sharpen = cfhddata->channel[0].user_blur_sharpen + tmp; // unity at 0.0
if (cfhddata->channel[chn].user_blur_sharpen < -1.0) cfhddata->channel[chn].user_blur_sharpen = -1.0;
if (cfhddata->channel[chn].user_blur_sharpen > 1.0) cfhddata->channel[chn].user_blur_sharpen = 1.0;
}
else
{
cfhddata->channel[0].user_blur_sharpen = tmp; // unity at 0.0
cfhddata->channel[1].user_blur_sharpen = cfhddata->channel[0].user_blur_sharpen;
cfhddata->channel[2].user_blur_sharpen = cfhddata->channel[0].user_blur_sharpen;
}
}
break;
case TAG_ASC_SATURATION:
{
if (delta)
{
cfhddata->channel[chn].user_cdl_sat = cfhddata->channel[0].user_cdl_sat + (*((float *)data)); // unity at 0.0
if (cfhddata->channel[chn].user_cdl_sat < -1.0f) cfhddata->channel[chn].user_cdl_sat = -1.0f;
if (cfhddata->channel[chn].user_cdl_sat > 10.0f) cfhddata->channel[chn].user_cdl_sat = 10.0f;
}
else
{
cfhddata->channel[0].user_cdl_sat = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_cdl_sat < -1.0f) cfhddata->channel[0].user_cdl_sat = -1.0f;
if (cfhddata->channel[0].user_cdl_sat > 10.0f) cfhddata->channel[0].user_cdl_sat = 10.0f;
cfhddata->channel[1].user_cdl_sat = cfhddata->channel[0].user_cdl_sat;
cfhddata->channel[2].user_cdl_sat = cfhddata->channel[0]. user_cdl_sat;
}
}
break;
case TAG_HIGHLIGHT_DESAT:
{
if (!delta)
{
cfhddata->channel[0].user_highlight_sat = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_highlight_sat < -1.0f) cfhddata->channel[0].user_highlight_sat = -1.0f;
if (cfhddata->channel[0].user_highlight_sat > 4.0f) cfhddata->channel[0].user_highlight_sat = 4.0f;
cfhddata->channel[1].user_highlight_sat = cfhddata->channel[0].user_highlight_sat;
cfhddata->channel[2].user_highlight_sat = cfhddata->channel[0].user_highlight_sat;
}
}
break;
case TAG_VIGNETTE_START:
{
if (!delta)
{
cfhddata->channel[0].user_vignette_start = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_vignette_start < -1.0f) cfhddata->channel[0].user_vignette_start = -1.0f;
if (cfhddata->channel[0].user_vignette_start > 0.0f) cfhddata->channel[0].user_vignette_start = 0.0f;
cfhddata->channel[1].user_vignette_start = cfhddata->channel[0].user_vignette_start;
cfhddata->channel[2].user_vignette_start = cfhddata->channel[0].user_vignette_start;
}
}
break;
case TAG_VIGNETTE_END:
{
if (!delta)
{
cfhddata->channel[0].user_vignette_end = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_vignette_end < -1.0f) cfhddata->channel[0].user_vignette_end = -1.0f;
if (cfhddata->channel[0].user_vignette_end > 1.0f) cfhddata->channel[0].user_vignette_end = 1.0f;
cfhddata->channel[1].user_vignette_end = cfhddata->channel[0].user_vignette_end;
cfhddata->channel[2].user_vignette_end = cfhddata->channel[0].user_vignette_end;
}
}
break;
case TAG_VIGNETTE_GAIN:
{
if (!delta)
{
cfhddata->channel[0].user_vignette_gain = (*((float *)data)); // unity at 0.0
if (cfhddata->channel[0].user_vignette_gain < 0.0f) cfhddata->channel[0].user_vignette_gain = 0.0f;
if (cfhddata->channel[0].user_vignette_gain > 4.0f) cfhddata->channel[0].user_vignette_gain = 4.0f;
cfhddata->channel[1].user_vignette_gain = cfhddata->channel[0].user_vignette_gain;
cfhddata->channel[2].user_vignette_gain = cfhddata->channel[0].user_vignette_gain;
}
}
break;
case TAG_HIGHLIGHT_POINT:
{
if (!delta)
{
// cfhddata->channel[0].user_vignette_start = *((float *)data)-1.0;
// cfhddata->channel[0].user_vignette_end = *((float *)data)-0.5;
// cfhddata->channel[0].user_vignette_gain = 0.0;
cfhddata->channel[0].user_highlight_point = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_highlight_point < -1.0f) cfhddata->channel[0].user_highlight_point = -1.0f;
if (cfhddata->channel[0].user_highlight_point > 0.0f) cfhddata->channel[0].user_highlight_point = 0.0f;
cfhddata->channel[1].user_highlight_point = cfhddata->channel[0].user_highlight_point;
cfhddata->channel[2].user_highlight_point = cfhddata->channel[0].user_highlight_point;
}
}
break;
case TAG_CONTRAST:
{
if (delta)
{
cfhddata->channel[chn].user_contrast = cfhddata->channel[0].user_contrast + (*((float *)data)); // unity at 0.0
if (cfhddata->channel[chn].user_contrast < -1.0f) cfhddata->channel[chn].user_contrast = -1.0f;
if (cfhddata->channel[chn].user_contrast > 10.0f) cfhddata->channel[chn].user_contrast = 10.0f;
}
else
{
cfhddata->channel[0].user_contrast = (*((float *)data) - 1.0f); // unity at 0.0
if (cfhddata->channel[0].user_contrast < -1.0f) cfhddata->channel[0].user_contrast = -1.0f;
if (cfhddata->channel[0].user_contrast > 10.0f) cfhddata->channel[0].user_contrast = 10.0f;
cfhddata->channel[1].user_contrast = cfhddata->channel[0].user_contrast;
cfhddata->channel[2].user_contrast = cfhddata->channel[0].user_contrast;
}
}
break;
case TAG_EXPOSURE:
{
//int i;
float *fptr = (float *)data;
tmp = *fptr++;
if (delta)
{
cfhddata->channel[chn].user_exposure = ((cfhddata->channel[0].user_exposure + 1.0f) * tmp) - 1.0f; // unity at 1.0
if (cfhddata->channel[chn].user_exposure < -1.0f) cfhddata->channel[chn].user_exposure = -1.0f;
if (cfhddata->channel[chn].user_exposure > 10.0f) cfhddata->channel[chn].user_exposure = 10.0f;
}
else
{
cfhddata->channel[0].user_exposure = tmp - 1.0f; // unity at 1.0
if (cfhddata->channel[0].user_exposure < -1.0f) cfhddata->channel[0].user_exposure = -1.0f;
if (cfhddata->channel[0].user_exposure > 10.0f) cfhddata->channel[0].user_exposure = 10.0f;
cfhddata->channel[1].user_exposure = cfhddata->channel[0].user_exposure;
cfhddata->channel[2].user_exposure = cfhddata->channel[0].user_exposure;
}
}
break;
case TAG_BASE_MATRIX:
if (!delta)
cfhddata->use_base_matrix = *((unsigned int *)data);
break;
case TAG_GHOST_BUST_LEFT:
if (!delta)
{
decoder->ghost_bust_left = *((unsigned int *)data);
if (decoder->sqrttable == NULL)
{
#if _ALLOCATOR
decoder->sqrttable = (unsigned short *)Alloc(decoder->allocator, sizeof(short) * 1024 * 1024 );
#else
decoder->sqrttable = (unsigned short *)MEMORY_ALLOC(sizeof(short) * 1024 * 1024 );
#endif
memset(decoder->sqrttable, -1, sizeof(short) * 1024 * 1024 );
}
}
break;
case TAG_GHOST_BUST_RIGHT:
if (!delta)
{
decoder->ghost_bust_right = *((unsigned int *)data);
if (decoder->sqrttable == NULL)
{
#if _ALLOCATOR
decoder->sqrttable = (unsigned short *)Alloc(decoder->allocator, sizeof(short) * 1024 * 1024 );
#else
decoder->sqrttable = (unsigned short *)MEMORY_ALLOC(sizeof(short) * 1024 * 1024 );
#endif
memset(decoder->sqrttable, -1, sizeof(short) * 1024 * 1024 );
}
}
break;
case TAG_MASK_LEFT:
if (delta)
{
}
else
{
tmp = *((float *)data);
if (tmp < -0.2f) tmp = -0.2f;
if (tmp > 0.2f) tmp = 0.2f;
cfhddata->channel[0].FloatingWindowMaskL = tmp;
}
break;
case TAG_MASK_RIGHT:
if (delta)
{
}
else
{
tmp = *((float *)data);
if (tmp < -0.2f) tmp = -0.2f;
if (tmp > 0.2f) tmp = 0.2f;
if (tmp > -0.0001 && tmp < 0.0001f)
tmp = 0.0;
cfhddata->channel[0].FloatingWindowMaskR = tmp;
}
break;
case TAG_FRAME_TILT:
if (delta)
{
}
else
{
tmp = *((float *)data);
if (tmp < -0.1f) tmp = -0.1f;
if (tmp > 0.1f) tmp = 0.1f;
cfhddata->channel[0].FrameTilt = tmp;
cfhddata->channel[1].FrameTilt = cfhddata->channel[0].FrameTilt;
cfhddata->channel[2].FrameTilt = cfhddata->channel[0].FrameTilt;
}
break;
case TAG_HORIZONTAL_OFFSET:
if (delta)
{
tmp = *((float *)data);
cfhddata->channel[chn].HorizontalOffset = cfhddata->channel[0].HorizontalOffset + tmp;
if (cfhddata->channel[chn].HorizontalOffset < -1.0) cfhddata->channel[chn].HorizontalOffset = -1.0;
if (cfhddata->channel[chn].HorizontalOffset > 1.0) cfhddata->channel[chn].HorizontalOffset = 1.0;
}
else
{
tmp = *((float *)data);
if (tmp < -1.0f) tmp = -1.0f;
if (tmp > 1.0f) tmp = 1.0f;
cfhddata->channel[0].HorizontalOffset = tmp;
cfhddata->channel[1].HorizontalOffset = cfhddata->channel[0].HorizontalOffset;
cfhddata->channel[2].HorizontalOffset = cfhddata->channel[0].HorizontalOffset;
}
break;
case TAG_VERTICAL_OFFSET:
if (delta)
{
tmp = *((float *)data);
cfhddata->channel[chn].VerticalOffset = cfhddata->channel[0].VerticalOffset + tmp;
if (cfhddata->channel[chn].VerticalOffset < -1.0) cfhddata->channel[chn].VerticalOffset = -1.0;
if (cfhddata->channel[chn].VerticalOffset > 1.0) cfhddata->channel[chn].VerticalOffset = 1.0;
}
else
{
tmp = *((float *)data);
if (tmp < -1.0f) tmp = -1.0f;
if (tmp > 1.0f) tmp = 1.0f;
cfhddata->channel[0].VerticalOffset = tmp;
cfhddata->channel[1].VerticalOffset = cfhddata->channel[0].VerticalOffset;
cfhddata->channel[2].VerticalOffset = cfhddata->channel[0].VerticalOffset;
}
break;
case TAG_ROTATION_OFFSET:
if (delta)
{
tmp = *((float *)data);
cfhddata->channel[chn].RotationOffset = cfhddata->channel[0].RotationOffset + tmp;
if (cfhddata->channel[chn].RotationOffset < -0.2f) cfhddata->channel[chn].RotationOffset = -0.2f;
if (cfhddata->channel[chn].RotationOffset > 0.2f) cfhddata->channel[chn].RotationOffset = 0.2f;
}
else
{
tmp = *((float *)data);
if (tmp < -0.2f) tmp = -0.2f;
if (tmp > 0.2f) tmp = 0.2f;
cfhddata->channel[0].RotationOffset = tmp;
cfhddata->channel[1].RotationOffset = cfhddata->channel[0].RotationOffset;
cfhddata->channel[2].RotationOffset = cfhddata->channel[0].RotationOffset;
}
break;
case TAG_FRAME_ZOOM:
{
tmp = *((float *)data);
if (delta)
{
cfhddata->channel[chn].FrameZoom = cfhddata->channel[0].FrameZoom * tmp;
}
else
{
tmp = *((float *)data);
if (tmp < 0.10f) tmp = 0.10f;
if (tmp > 4.0f) tmp = 4.0f;
cfhddata->channel[0].FrameZoom = tmp;
cfhddata->channel[1].FrameZoom = cfhddata->channel[0].FrameZoom;
cfhddata->channel[2].FrameZoom = cfhddata->channel[0].FrameZoom;
}
}
break;
case TAG_FRAME_DIFF_ZOOM:
{
tmp = *((float *)data);
if (delta)
{
cfhddata->channel[chn].FrameDiffZoom = cfhddata->channel[0].FrameDiffZoom * tmp;
}
else
{
tmp = *((float *)data);
if (tmp < 0.5f) tmp = 0.5f;
if (tmp > 2.0f) tmp = 2.0f;
cfhddata->channel[0].FrameDiffZoom = tmp;
cfhddata->channel[1].FrameDiffZoom = cfhddata->channel[0].FrameDiffZoom;
cfhddata->channel[2].FrameDiffZoom = cfhddata->channel[0].FrameDiffZoom;
}
}
break;
case TAG_FRAME_KEYSTONE:
{
tmp = *((float *)data);
if (delta)
{
// cfhddata->channel[chn].FrameKeyStone = cfhddata->channel[0].FrameKeyStone + tmp;
}
else
{
tmp = *((float *)data);
if (tmp < -0.2f) tmp = -0.2f;
if (tmp > 0.2f) tmp = 0.2f;
cfhddata->channel[0].FrameKeyStone = tmp;
cfhddata->channel[1].FrameKeyStone = cfhddata->channel[0].FrameKeyStone;
cfhddata->channel[2].FrameKeyStone = cfhddata->channel[0].FrameKeyStone;
}
}
break;
case TAG_AUTO_ZOOM:
{
size_t lsize = size;
if (lsize > sizeof(unsigned long))
lsize = sizeof(unsigned long);
if (*((uint32_t *)data) == 0)
cfhddata->FramingFlags &= ~1;
else
cfhddata->FramingFlags |= 1;
}
break;
case TAG_FRAME_MASK:
{
size_t lsize = size;
if (lsize > sizeof(Frame_Region) * 2)
lsize = sizeof(Frame_Region) * 2;
memcpy(&cfhddata->channel[0].FrameMask, data, lsize);
}
break;
case TAG_FRAME_OFFSET_X:
tmp = *((float *)data);
if (tmp < -0.5f) tmp = -0.5f;
if (tmp > 0.5f) tmp = 0.5f;
cfhddata->FrameOffsetX = -tmp;
break;
case TAG_FRAME_OFFSET_Y:
tmp = *((float *)data);
if (tmp < -0.5f) tmp = -0.5f;
if (tmp > 0.5f) tmp = 0.5f;
cfhddata->FrameOffsetY = tmp;
break;
case TAG_FRAME_OFFSET_R:
tmp = *((float *)data);
if (tmp < -0.5f) tmp = -0.5f;
if (tmp > 0.5f) tmp = 0.5f;
cfhddata->FrameOffsetR = tmp;
break;
case TAG_FRAME_OFFSET_F:
tmp = *((float *)data);
if (tmp < -90.0f) tmp = -90.0f;
if (tmp > 90.0f) tmp = 90.0f;
cfhddata->FrameOffsetF = tmp;
break;
case TAG_FRAME_HSCALE:
tmp = *((float *)data);
//if(tmp < 0.75) tmp = 0.75;
//if(tmp > 1.25) tmp = 1.25;
cfhddata->FrameHScale = tmp;
break;
case TAG_FRAME_HDYNAMIC:
tmp = *((float *)data);
if (tmp < 0.5f) tmp = 0.5f;
if (tmp > 1.5f) tmp = 1.5f;
cfhddata->FrameHDynamic = tmp;
break;
case TAG_FRAME_DYNCENTER:
tmp = *((float *)data);
if (tmp < 0.0f) tmp = 0.0f;
if (tmp > 1.0f) tmp = 1.0f;
cfhddata->FrameHDynCenter = tmp;
break;
case TAG_FRAME_DYNWIDTH:
tmp = *((float *)data);
if (tmp < 0.0f) tmp = 0.0f;
if (tmp > 1.0f) tmp = 1.0f;
cfhddata->FrameHDynWidth = tmp;
break;
case TAG_SPLIT_POS:
tmp = *((float *)data);
if (tmp < 0.0f) tmp = 0.0f;
if (tmp > 1.0f) tmp = 1.0f;
cfhddata->split_CC_position = tmp;
break;
/* case TAG_HISTOGRAM:
{
int lsize = size;
if(lsize > sizeof(uint32_t))
lsize = sizeof(uint32_t);
if(*((uint32_t *)data) == 0)
cfhddata->BurninFlags &= ~2;
else
cfhddata->BurninFlags |= 2;
}
break;
case TAG_WAVEFORM:
{
int lsize = size;
if(lsize > sizeof(uint32_t))
lsize = sizeof(uint32_t);
if(*((uint32_t *)data) == 0)
cfhddata->BurninFlags &= ~4;
else
cfhddata->BurninFlags |= 4;
}
break;
case TAG_VECTORSCOPE:
{
int lsize = size;
if(lsize > sizeof(uint32_t))
lsize = sizeof(uint32_t);
if(*((uint32_t *)data) == 0)
cfhddata->BurninFlags &= ~8;
else
cfhddata->BurninFlags |= 8;
}
break;
*/
case TAG_DISPLAY_METADATA:
{
int i, foundID = 0;
char CurrentID[64];
char LoadedID[64];
GetCurrentID(decoder, &ptr[8], (unsigned int)size, CurrentID, sizeof(CurrentID));
if (0 == strncmp(CurrentID, "Tool:", 5))
{
if (0 == strcmp(CurrentID, "Tool:Histogram"))
cfhddata->ComputeFlags |= 2;
if (0 == strcmp(CurrentID, "Tool:Waveform"))
cfhddata->ComputeFlags |= 4;
if (0 == strcmp(CurrentID, "Tool:Vectorscope") || 0 == strcmp(CurrentID, "Tool:Vectorscope2"))
cfhddata->ComputeFlags |= 8;
if (0 == strncmp(CurrentID, "Tool:Grid", 9))
cfhddata->ComputeFlags |= 16;
}
for (i = 0; i < decoder->drawmetadataobjects; i++)
{
GetCurrentID(decoder, decoder->dmo[i], decoder->dmo_size[i], LoadedID, sizeof(LoadedID));
if (0 == strcmp(LoadedID, CurrentID))
{
foundID = 1;
break;
}
}
if (!foundID)
{
decoder->dmo[decoder->drawmetadataobjects] = &ptr[8];
decoder->dmo_size[decoder->drawmetadataobjects] = (unsigned int)size;
decoder->drawmetadataobjects++;
//skip the data within (process later)
memcpy(&decoder->MDPcurrent, &decoder->MDPdefault, sizeof(MDParams));
}
}
break;
case TAG_DISPLAY_ACTION_SAFE:
memcpy(&decoder->ActiveSafe[0], data, size);
break;
case TAG_DISPLAY_TITLE_SAFE:
memcpy(&decoder->TitleSafe[0], data, size);
break;
case TAG_DISPLAY_OVERLAY_SAFE:
memcpy(&decoder->OverlaySafe[0], data, size);
break;
case TAG_DISPLAY_SCRIPT:
case TAG_DISPLAY_SCRIPT_FILE:
break;
case TAG_DISPLAY_TAG:
decoder->MDPdefault.tag = *((uint32_t *)data);
decoder->MDPdefault.freeform[0] = 0;
break;
case TAG_DISPLAY_FREEFORM:
copysize = size;
if (copysize >= FREEFORM_STR_MAXSIZE) copysize = FREEFORM_STR_MAXSIZE - 1;
#ifdef _WIN32
strncpy_s(decoder->MDPdefault.freeform, sizeof(decoder->MDPdefault.freeform), (char *)data, copysize);
#else
strncpy(decoder->MDPdefault.freeform, (char *)data, copysize);
#endif
decoder->MDPdefault.freeform[copysize] = '\0';
decoder->MDPdefault.tag = 0;
break;
case TAG_DISPLAY_FONT:
copysize = size;
if (copysize >= FONTNAME_STR_MAXSIZE) copysize = FONTNAME_STR_MAXSIZE - 1;
#ifdef _WIN32
strncpy_s(decoder->MDPdefault.font, sizeof(decoder->MDPdefault.font), (char *)data, copysize);
#else
strncpy(decoder->MDPdefault.font, (char *)data, copysize);
#endif
decoder->MDPdefault.font[copysize] = 0;
break;
case TAG_DISPLAY_FONTSIZE:
decoder->MDPdefault.fontsize = *((float *)data);
break;
case TAG_DISPLAY_JUSTIFY:
decoder->MDPdefault.justication = *((uint32_t *)data);
break;
case TAG_DISPLAY_FCOLOR:
memcpy(&decoder->MDPdefault.fcolor[0], data, sizeof(float) * 4);
break;
case TAG_DISPLAY_BCOLOR:
memcpy(&decoder->MDPdefault.bcolor[0], data, sizeof(float) * 4);
break;
case TAG_DISPLAY_SCOLOR:
memcpy(&decoder->MDPdefault.scolor[0], data, sizeof(float) * 4);
break;
case TAG_DISPLAY_STROKE_WIDTH:
decoder->MDPdefault.stroke_width = *((float *)data);
break;
case TAG_DISPLAY_XPOS:
decoder->MDPdefault.xypos[0][0] = *((float *)data);
break;
case TAG_DISPLAY_YPOS:
decoder->MDPdefault.xypos[0][1] = *((float *)data);
break;
case TAG_DISPLAY_XYPOS:
memcpy(&decoder->MDPdefault.xypos[0][0], data, sizeof(float) * 2);
break;
case TAG_DISPLAY_FORMAT:
copysize = size;
if (copysize >= FORMAT_STR_MAXSIZE) copysize = FORMAT_STR_MAXSIZE - 1;
#ifdef _WIN32
strncpy_s(decoder->MDPdefault.format_str, sizeof(decoder->MDPdefault.format_str), (char *)data, copysize);
#else
strncpy(decoder->MDPdefault.format_str, (char *)data, copysize);
#endif
decoder->MDPdefault.format_str[copysize] = '\0';
break;
case TAG_DISPLAY_PNG_PATH:
copysize = size;
if (copysize >= PNG_PATH_MAXSIZE) copysize = PNG_PATH_MAXSIZE - 1;
#ifdef _WIN32
strncpy_s(decoder->MDPdefault.png_path, sizeof(decoder->MDPdefault.png_path), (char *)data, copysize);
#else
strncpy(decoder->MDPdefault.png_path, (char *)data, copysize);
#endif
decoder->MDPdefault.png_path[copysize] = '\0';
break;
case TAG_DISPLAY_PNG_SIZE:
memcpy(&decoder->MDPdefault.object_scale[0], data, sizeof(float) * 2);
break;
case TAG_DISPLAY_PARALLAX:
decoder->MDPdefault.parallax = *((int32_t *)data);
break;
case TAG_EYE_DELTA_2:
localpri++;
case TAG_EYE_DELTA_1:
localpri++;
localpri = priority;
break;
}
}
if (!terminate)
{
ptr += (8 + size + 3) & 0xfffffc;
pos += (8 + size + 3) & 0xfffffc;
}
}
if (cfhddata->FramingFlags & 1)
{
int i;//both,left,rght
int w = 16, h = 9;
float denom, autozoom, horizZoom1, horizZoom2, verticalZoom;
GetDisplayAspectRatio(decoder, &w, &h);
for (i = 0; i < 3; i++)
{
horizZoom1 = horizZoom2 = fabsf(cfhddata->channel[i].HorizontalOffset) + fabsf(cfhddata->channel[i].RotationOffset * 0.5f);
verticalZoom = fabsf(cfhddata->channel[i].VerticalOffset) + fabsf(cfhddata->channel[i].RotationOffset * (float)(w * w) / (float)(h * h) * 0.5f); // 16/9
verticalZoom += fabsf(cfhddata->channel[i].FrameKeyStone / 4.0f);
horizZoom1 += cfhddata->channel[0].FrameTilt * 0.5f;
horizZoom2 -= cfhddata->channel[0].FrameTilt * 0.5f;
denom = (1.0f - verticalZoom * 2);
if (denom > (1.0f - horizZoom1 * 2))
denom = (1.0f - horizZoom1 * 2);
if (denom > (1.0f - horizZoom2 * 2))
denom = (1.0f - horizZoom2 * 2);
if (denom < 0.25f) denom = 0.25f;
autozoom = 1.0f / denom;
if (autozoom > 4.0f)
autozoom = 4.0f;
if (i < 2)
cfhddata->channel[i].FrameAutoZoom = autozoom / cfhddata->channel[1].FrameDiffZoom;
else
cfhddata->channel[i].FrameAutoZoom = autozoom * cfhddata->channel[2].FrameDiffZoom;
}
if (cfhddata->channel[0].FrameAutoZoom < cfhddata->channel[1].FrameAutoZoom)
cfhddata->channel[0].FrameAutoZoom = cfhddata->channel[1].FrameAutoZoom;
if (cfhddata->channel[0].FrameAutoZoom < cfhddata->channel[2].FrameAutoZoom)
cfhddata->channel[0].FrameAutoZoom = cfhddata->channel[2].FrameAutoZoom;
}
else
{
cfhddata->channel[0].FrameAutoZoom = 1.0f;
cfhddata->channel[1].FrameAutoZoom = 1.0f / cfhddata->channel[1].FrameDiffZoom;
cfhddata->channel[2].FrameAutoZoom = 1.0f * cfhddata->channel[2].FrameDiffZoom;
}
}
}
void GetCurrentID(DECODER *decoder, unsigned char *ptr, unsigned int len, char *id, unsigned int id_size)
{
if (decoder && ptr && len && id) // overrides form database or external control
{
//int inframe = 0, duration = 0;
//unsigned char *base = ptr;
void *data;
unsigned char type;
unsigned int pos = 0;
unsigned int size;
//unsigned int copysize;
unsigned int tag;
while (pos + 12 <= len)
{
data = (void *)&ptr[8];
type = ptr[7];
size = ptr[4] + (ptr[5] << 8) + (ptr[6] << 16);
tag = MAKETAG(ptr[0], ptr[1], ptr[2], ptr[3]);
switch (tag)
{
default:
break;
case TAG_DISPLAY_TAG:
tag = *((uint32_t *)data);
id[0] = 'T';
id[1] = 'A';
id[2] = 'G';
id[3] = ':';
id[4] = tag & 0xff;
id[5] = (tag >> 8) & 0xff;
id[6] = (tag >> 16) & 0xff;
id[7] = (tag >> 24) & 0xff;
id[8] = 0;
break;
case TAG_DISPLAY_FREEFORM:
if (size > id_size - 1) size = id_size - 1;
#ifdef _WIN32
strncpy_s(id, id_size, (char *)data, size);
#else
strncpy(id, (char *)data, size);
#endif
id[size] = 0;
break;
}
ptr += (8 + size + 3) & 0xfffffc;
pos += (8 + size + 3) & 0xfffffc;
}
}
}
|
sync.c | // Load the OpenMP functions library
#include<omp.h>
int main()
{
// Set and initialise variables
int tnum=0, incr=0;
// Start parallel block
// #pragma omp parallel private(tnum)
// {
// Start a critical block that we want only one thread to access
// at a time. Note that the 'incr' variable is NOT private!
// #pragma omp critical
// {
incr = incr + 1;
// }
// Wait here with barrier
// #pragma omp barrier
// The master thread prints out the results of the calculation and
// then does some other processing that the other threads have to
// wait for.
// #pragma omp master
// {
tnum = omp_get_thread_num();
printf("Master thread is number %d\n", tnum);
printf("Summation = %d\n", incr);
sleep (10);
// }
// Ensure ALL threads have finished their processing before continuing.
//#pragma omp barrier
// {
printf("finished!\n");
// }
// }
return 0;
}
|
matrixMultiplication.c | #include <stdio.h>
#include <omp.h>
int main(){
int a[3][3], b[3][3], c[3][3];
int i,j,k,l=1;
omp_set_dynamic(0);
int m = omp_get_num_procs();
omp_set_num_threads(m);
for (i = 0; i < 3; i++){
for (j = 0; j < 3; j++){
a[i][j] = l;
b[i][j] = 10 - l;
l++;
}
}
printf("\nA: \n");
for (i = 0; i < 3; i++){
for (j = 0; j < 3; j++){
printf("%d ", a[i][j]);
}
printf("\n");
}
printf("\nB: \n");
for (i = 0; i < 3; i++){
for (j = 0; j < 3; j++){
printf("%d ", b[i][j]);
}
printf("\n");
}
#pragma omp parallel for shared(a, b, c) private(i, j, k)
for (i = 0; i < 3; ++i) {
for (j = 0; j < 3; ++j) {
int sum = 0;
#pragma omp parallel for private(k) reduction(+:sum)
for (k = 0; k < 3; ++k){
sum += a[i][k] * b[k][j];
}
c[i][j] = sum;
}
}
printf("\nC: \n");
for (i= 0; i< 3; i++){
for (j= 0; j< 3; j++){
printf("%d\t",c[i][j]);
}
printf("\n");
}
return 0;
} |
GB_AxB_saxpy_parallel.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy_parallel: C<M>=A*B, C=A*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Parallel matrix-matrix multiply, A*B with optional mask M, using the saxpy
// method. This method is used by GrB_mxm, GrB_vxm, and GrB_mxv. For both of
// the latter two methods, B on input will be an nrows-by-1 column vxector.
// The strategy is to "slice" (or partition) B, as B = [B0 B1 ... B(t-1)] if
// there are t threads. Then each thread k computes C(k) = A*B(k), and then
// the result is concatenated, as C = [C0 C1 ... C(t-1)].
// Each thread k computes an independent output matrix C(k), doing both its
// analysis and numeric phases.
// This strategy works well for OpenMP, but it could also be written in a
// purely inspector+executor style, like the GB_AxB_dot* methods. Those
// methods do the analysis in parallel, and first determine the size of the
// output matrix C. Then a parallel cumulative sum is computed, and the entire
// output matrix is allocated. Then each task of the the numeric phase
// computes its part of the result C, without the need for any memory
// allocation by individual threads.
// This function, and the matrices C, M, A, and B are all CSR/CSC agnostic.
// For this discussion, suppose they are CSC, with vlen = # of rows, and vdim =
// # of columns.
// A*B is being computed, and the vector dimension of A must be identical to
// the vector length of B (as if both A and B are CSC matrices, and the number
// of columns of A is the same as the number of rows of B).
// The output matrix C = *Chandle has not been allocated, so C is NULL on
// input. The mask M is optional.
// The semiring defines C=A*B. flipxy modifies how the semiring multiply
// operator is applied. If false, then fmult(aik,bkj) is computed. If true,
// then the operands are swapped, and fmult(bkj,aij) is done instead.
// AxB_method selects the method to use:
// GxB_DEFAULT: the method is selected automatically
// GxB_AxB_GUSTAVSON: Gustavson's method for A*B
// GxB_AxB_HEAP: heap method for A*B
// GxB_AxB_HASH: hash method for A*B (FUTURE)
// The dot product method does not use this function.
// AxB_method_used reports the method actually chosen. This is for
// informational purposes only, so if a parallel C=A*B splits the work into
// multiple submatrix multiplications, and uses different methods on each
// submatrix, then AxB_method_used is the method chosen by thread zero.
// FUTURE:: hash-based method, and multi-phase Gustavson and Heap methods,
// which do not do any memory allocations in parallel, but instead use an
// inspector+executur style (like GB_AxB_dot*). This should work better on the
// GPU.
#include "GB_mxm.h"
#include "GB_Sauna.h"
GrB_Info GB_AxB_saxpy_parallel // parallel matrix-matrix multiply
(
GrB_Matrix *Chandle, // output matrix, NULL on input
GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
const GrB_Desc_Value AxB_method,// for auto vs user selection of methods
GrB_Desc_Value *AxB_method_used,// method selected by thread zero
bool *mask_applied, // if true, mask was applied
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ; // C = (*Chandle) is NULL
ASSERT (*Chandle == NULL) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for parallel A*B", GB0)) ;
ASSERT_OK (GB_check (A, "A for parallel A*B", GB0)) ;
ASSERT_OK (GB_check (B, "B for parallel A*B", GB0)) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_OK (GB_check (semiring, "semiring for parallel A*B", GB0)) ;
ASSERT (AxB_method_used != NULL) ;
GrB_Info info ;
//--------------------------------------------------------------------------
// get A and B
//--------------------------------------------------------------------------
if (B->nvec_nonempty < 0)
{
B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ;
}
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = GB_nvec_nonempty (A, NULL) ;
}
int64_t anz = GB_NNZ (A) ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// nthreads may be reduced after the flopcount is computed.
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + bnz, chunk, nthreads_max) ;
//==========================================================================
// sequential C<M>=A*B
//==========================================================================
#define GB_FREE_ALL ;
if (nthreads == 1)
{
// select the method
int64_t bjnz_max ;
GB_AxB_select (A, B, semiring, AxB_method, AxB_method_used, &bjnz_max) ;
// acquire a Sauna if Gustavson's method is being used
int Sauna_id = -2 ;
if (*AxB_method_used == GxB_AxB_GUSTAVSON)
{
GB_OK (GB_Sauna_acquire (1, &Sauna_id, AxB_method_used, Context)) ;
}
// C<M>=A*B
GrB_Info info1 = GB_AxB_saxpy_sequential (Chandle, M, Mask_comp, A, B,
semiring, flipxy, *AxB_method_used, bjnz_max, true, mask_applied,
Sauna_id) ;
// release the Sauna for Gustavson's method
if (*AxB_method_used == GxB_AxB_GUSTAVSON)
{
// info is reset, so info1 is used above
GB_OK (GB_Sauna_release (1, &Sauna_id)) ;
}
return ((info1 == GrB_OUT_OF_MEMORY) ? GB_OUT_OF_MEMORY : info1) ;
}
//==========================================================================
// parallel C<M>=A*B
//==========================================================================
// The # of threads may be reduced, if the problem small, even to
// nthreads=1. But so far, for now, nthreads > 1.
ASSERT (nthreads > 1) ;
//--------------------------------------------------------------------------
// count the flops and determine # of threads to use
//--------------------------------------------------------------------------
int64_t total_flops ;
bool fine_slice = (nthreads > bnvec) ;
int64_t *restrict Bflops = NULL ;
int64_t *restrict Bflops_per_entry = NULL ;
if (!fine_slice)
{
//----------------------------------------------------------------------
// slice B by flops
//----------------------------------------------------------------------
// Slice B so that each slice has a balanced amount of flops, to
// compute its slice of C. Each thread gets enough columns of B so
// that it has roughly total_flops / nthreads work to do. Individual
// columns are not sliced, so the final step to compute C is a
// concatenation, not as summation. This should give a very good load
// balance where there are enough columns of B, but at the cost of a
// more expensive symbolic analysis, taking O(bnz) time. The analysis
// is itself fully parallel, however. This method cannot parallelize
// A*B when B is a single column (GrB_mxv or GrB_vxm).
// thread tid will do columns Slice [tid] to Slice [tid+1]-1
// note that Bflops is initialized to zero
GB_CALLOC_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ;
if (Bflops == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
// Bflops [k] = # of flops to compute A*B(:,j) where j is the kth
// vector in B
GB_AxB_flopcount (Bflops, NULL, (Mask_comp) ? NULL : M, A, B, 0,
Context) ;
// reduce # of threads, based on flop count and the chunk size
total_flops = Bflops [bnvec] ;
}
else
{
//----------------------------------------------------------------------
// fine slice of B by flops (split columns of B)
//----------------------------------------------------------------------
// Slice B so that each slice has nearly exactly balanced amount of
// flops to compute its slice of C. Each thread gets exactly the
// number of entries so that it does total_flops/nthreads work (rounded
// to the nearest number of entries in B).
// note that Bflops_per_entry is initialized to zero
GB_CALLOC_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ;
if (Bflops_per_entry == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
// Bflops_per_entry [p] = # of flops to compute A(:,k)*B(k,j)
// where B(k,j) is in Bi [p] and Bx [p].
GB_AxB_flopcount (NULL, Bflops_per_entry, (Mask_comp) ? NULL : M,
A, B, 0, Context) ;
// reduce # of threads, based on flop count and the chunk size
total_flops = Bflops_per_entry [bnz] ;
}
//--------------------------------------------------------------------------
// find the size of each slice
//--------------------------------------------------------------------------
nthreads = GB_nthreads (total_flops, chunk, nthreads_max) ;
int64_t Slice [nthreads+1] ;
Slice [0] = 0 ;
if (!fine_slice)
{
// slice B by the flops needed for each vector
GB_pslice (Slice, Bflops, bnvec, nthreads) ;
GB_FREE_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ;
}
else
{
// slice B by the flops needed for each entry
GB_pslice (Slice, Bflops_per_entry, bnz, nthreads) ;
GB_FREE_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ;
}
//--------------------------------------------------------------------------
// discard the mask if it's too costly to use
//--------------------------------------------------------------------------
if (M != NULL && total_flops < GB_NNZ (M))
{
// The mask is too dense; discard it. mask_applied will be false.
M = NULL ;
}
//--------------------------------------------------------------------------
// construct each slice of B
//--------------------------------------------------------------------------
// If the problem is small enough so that nthreads has been reduced to 1,
// B is not sliced.
GrB_Matrix Cslice [nthreads] ;
GrB_Matrix Bslice [nthreads] ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
Cslice [tid] = NULL ;
Bslice [tid] = NULL ;
}
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
for (int tid = 0 ; tid < nthreads ; tid++) \
{ \
GB_MATRIX_FREE (& (Cslice [tid])) ; \
GB_MATRIX_FREE (& (Bslice [tid])) ; \
} \
}
if (nthreads > 1)
{
if (fine_slice)
{
GB_OK (GB_fine_slice (B, nthreads, Slice, Bslice, Context)) ;
}
else
{
GB_OK (GB_slice (B, nthreads, Slice, Bslice, Context)) ;
}
}
//--------------------------------------------------------------------------
// select the method for each slice
//--------------------------------------------------------------------------
GrB_Desc_Value AxB_methods_used [nthreads] ;
int64_t bjnz_max [nthreads] ;
int Sauna_ids [nthreads] ;
bool any_Gustavson = false ;
#pragma omp parallel for num_threads(nthreads) schedule(static,1) \
reduction(||:any_Gustavson)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Desc_Value thread_method_to_use ;
GB_AxB_select (A, (nthreads == 1) ? B : Bslice [tid], semiring,
AxB_method, &thread_method_to_use, &(bjnz_max [tid])) ;
AxB_methods_used [tid] = thread_method_to_use ;
// collect all thread-specific info
any_Gustavson = any_Gustavson ||
(thread_method_to_use == GxB_AxB_GUSTAVSON) ;
}
(*AxB_method_used) = AxB_methods_used [0] ;
//--------------------------------------------------------------------------
// acquire the Saunas for each thread that needs it
//--------------------------------------------------------------------------
if (any_Gustavson)
{
// at least one thread needs a Sauna
GB_OK (GB_Sauna_acquire (nthreads, Sauna_ids, AxB_methods_used,
Context)) ;
}
else
{
// no thread needs a Sauna
for (int tid = 0 ; tid < nthreads ; tid++)
{
Sauna_ids [tid] = -2 ;
}
}
//--------------------------------------------------------------------------
// compute each slice of C = A*B with optional mask M
//--------------------------------------------------------------------------
// This is the only parallel region in which each thread allocates memory.
// The memory space is not known until the thread determines the size of
// its own output, in its analysis phase. Note the "reduction(&&:ok)"
// clause. This is the only place where a clause like that apppears in
// SuiteSparse:GraphBLAS. This could be removed if C=A*B were to be
// computed with an inspector+exector style of algorithm.
// B has been "sliced"; in MATLAB notation, B = [B0 B1 B2 ... B(t-1] if
// there are t threads. Then each k thread computes its own Ck = A*Bk,
// and the results are concatenated below, as C = [C0 C1 ... C(t-1)].
// If a 'fine slice' was used for B, then C = C0+C1+...+C(t-1) must be
// computed.
// for all threads in parallel, with no synchronization except for these
// boolean reductions:
bool ok = true ; // false if any thread's malloc or realloc fails
bool panic = false ; // true if any critical section fails
bool allmask = true ; // true if all threads apply the mask
#pragma omp parallel for num_threads(nthreads) schedule(static,1) \
reduction(&&:allmask) reduction(||:panic) \
reduction(&&:ok)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// each thread allocates its output, using malloc and realloc
bool thread_mask_applied = false ;
GrB_Info thread_info = GB_AxB_saxpy_sequential (&(Cslice [tid]), M,
Mask_comp, A, (nthreads == 1) ? B : Bslice [tid], semiring,
flipxy, AxB_methods_used [tid], bjnz_max [tid],
false, &thread_mask_applied, Sauna_ids [tid]) ;
// collect all thread-specific info
ok = ok && (thread_info == GrB_SUCCESS) ;
allmask = allmask && (thread_mask_applied) ;
panic = panic || (thread_info == GrB_PANIC) ;
}
//--------------------------------------------------------------------------
// check error conditions
//--------------------------------------------------------------------------
// panic if a critical section fails
if (panic) return (GrB_PANIC) ;
// check the return info from all the threads
if (!ok)
{
// out of memory
if (any_Gustavson)
{
// at least one thread used a Sauna; free and release all Sauna
// workspaces
for (int tid = 0 ; tid < nthreads ; tid++)
{
int Sauna_id = Sauna_ids [tid] ;
if (Sauna_id >= 0)
{
GB_Sauna_free (Sauna_id) ;
}
}
GB_OK (GB_Sauna_release (nthreads, Sauna_ids)) ;
}
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// release the Saunas
//--------------------------------------------------------------------------
if (any_Gustavson)
{
// at least one thread used a Sauna
GB_OK (GB_Sauna_release (nthreads, Sauna_ids)) ;
}
//--------------------------------------------------------------------------
// check if all threads applied the mask
//--------------------------------------------------------------------------
// if all threads applied the mask to their slices, then GB_accum_mask does
// not need to apply it to the concatenated C in GB_AxB_meta. If just some
// of them did, then GB_accum_mask needs to apply the mask again.
(*mask_applied) = allmask ;
//--------------------------------------------------------------------------
// concatenate or sum the slices of C
//--------------------------------------------------------------------------
// Each slice Cslice [tid] has the same dimensions and type as C. C is
// stored by column.
if (nthreads == 1)
{
// one thread, so only one slice: just copy Cslice[0] to C
(*Chandle) = Cslice [0] ;
Cslice [0] = NULL ;
}
else if (fine_slice)
{
// C = sum (Cslice [0..nthreads-1]). Adjacent slices of C can share
// columns, which must be summed. Columns in the middle of each slice
// are concatenated horizontally.
GB_OK (GB_hcat_fine_slice (Chandle, nthreads, Cslice, semiring->add,
Sauna_ids, Context)) ;
}
else
{
// C = [Cslice(0) Cslice(1) ... Cslice(nthreads-1)] concatenatied
// horizontally. Each slice contains entries that appear in a unique
// and contiguous subset of the columns of C.
GB_OK (GB_hcat_slice (Chandle, nthreads, Cslice, Context)) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
ASSERT_OK (GB_check (*Chandle, "C for parallel A*B", GB0)) ;
return (GrB_SUCCESS) ;
}
|
hnswalg.h | #pragma once
#include "visited_list_pool.h"
#include "hnswlib.h"
#include <atomic>
#include <random>
#include <stdlib.h>
#include <unordered_set>
#include <list>
#include <assert.h>
namespace hnswlib
{
class StopH
{
std::chrono::steady_clock::time_point time_begin;
public:
StopH()
{
time_begin = std::chrono::steady_clock::now();
}
float getElapsedTimeMicro()
{
std::chrono::steady_clock::time_point time_end = std::chrono::steady_clock::now();
return (std::chrono::duration_cast<std::chrono::microseconds>(time_end - time_begin).count());
}
void reset()
{
time_begin = std::chrono::steady_clock::now();
}
};
typedef unsigned int tableint;
typedef unsigned int linklistsizeint;
template <typename dist_t>
class HierarchicalNSW : public AlgorithmInterface<dist_t>
{
public:
static const tableint max_update_element_locks = 65536;
HierarchicalNSW(SpaceInterface<dist_t> *s)
{
}
HierarchicalNSW(SpaceInterface<dist_t> *s, const std::string &location, bool nmslib = false, size_t max_elements = 0)
{
loadIndex(location, s, max_elements);
}
HierarchicalNSW(SpaceInterface<dist_t> *s, size_t max_elements, size_t M = 16, size_t ef_construction = 200, size_t random_seed = 100) : link_list_locks_(max_elements), element_levels_(max_elements), link_list_update_locks_(max_update_element_locks)
{
max_elements_ = max_elements;
has_deletions_ = false;
data_size_ = s->get_data_size();
fstdistfunc_ = s->get_dist_func();
dist_func_param_ = s->get_dist_func_param();
M_ = M;
maxM_ = M_;
maxM0_ = M_ * 2;
ef_construction_ = std::max(ef_construction, M_);
ef_ = 10;
level_generator_.seed(random_seed);
update_probability_generator_.seed(random_seed + 1);
size_links_level0_ = maxM0_ * sizeof(tableint) + sizeof(linklistsizeint);
size_data_per_element_ = size_links_level0_ + data_size_ + sizeof(labeltype);
offsetData_ = size_links_level0_;
label_offset_ = size_links_level0_ + data_size_;
offsetLevel0_ = 0;
data_level0_memory_ = (char *)malloc(max_elements_ * size_data_per_element_);
if (data_level0_memory_ == nullptr)
throw std::runtime_error("Not enough memory");
num_layer = 3;
data_level0_memory_multi_layer = (char **)malloc(sizeof(char *) * num_layer);
for (int i = 0; i < num_layer; i++)
{
data_level0_memory_multi_layer[i] = (char *)malloc(max_elements_ * size_data_per_element_);
if (data_level0_memory_multi_layer[i] == nullptr)
throw std::runtime_error("Not enough memory");
}
cur_element_count = 0;
visited_list_pool_ = new VisitedListPool(1, max_elements);
//initializations for special treatment of the first node
enterpoint_node_ = -1;
maxlevel_ = -1;
linkLists_ = (char **)malloc(sizeof(void *) * max_elements_);
if (linkLists_ == nullptr)
throw std::runtime_error("Not enough memory: HierarchicalNSW failed to allocate linklists");
size_links_per_element_ = maxM_ * sizeof(tableint) + sizeof(linklistsizeint);
mult_ = 1 / log(1.0 * M_);
revSize_ = 1.0 / mult_;
}
struct CompareByFirst
{
constexpr bool operator()(std::pair<dist_t, tableint> const &a,
std::pair<dist_t, tableint> const &b) const noexcept
{
return a.first < b.first;
}
};
~HierarchicalNSW()
{
free(data_level0_memory_);
for (tableint i = 0; i < cur_element_count; i++)
{
if (element_levels_[i] > 0)
free(linkLists_[i]);
}
free(linkLists_);
for (int i = 0; i < num_layer; i++)
{
free(data_level0_memory_multi_layer[i]);
}
free(data_level0_memory_multi_layer);
delete visited_list_pool_;
}
size_t max_elements_;
size_t cur_element_count;
size_t size_data_per_element_;
size_t size_links_per_element_;
size_t M_;
size_t maxM_;
size_t maxM0_;
size_t ef_construction_;
size_t num_layer;
double mult_, revSize_;
int maxlevel_;
VisitedListPool *visited_list_pool_;
std::mutex cur_element_count_guard_;
std::vector<std::mutex> link_list_locks_;
// Locks to prevent race condition during update/insert of an element at same time.
// Note: Locks for additions can also be used to prevent this race condition if the querying of KNN is not exposed along with update/inserts i.e multithread insert/update/query in parallel.
std::vector<std::mutex> link_list_update_locks_;
tableint enterpoint_node_;
size_t size_links_level0_;
size_t offsetData_, offsetLevel0_;
char *data_level0_memory_;
char **data_level0_memory_multi_layer;
char **linkLists_;
std::vector<int> element_levels_;
size_t data_size_;
bool has_deletions_;
size_t label_offset_;
DISTFUNC<dist_t> fstdistfunc_;
void *dist_func_param_;
std::unordered_map<labeltype, tableint> label_lookup_;
std::default_random_engine level_generator_;
std::default_random_engine update_probability_generator_;
inline labeltype getExternalLabel(tableint internal_id) const
{
labeltype return_label;
memcpy(&return_label, (data_level0_memory_ + internal_id * size_data_per_element_ + label_offset_), sizeof(labeltype));
return return_label;
}
inline labeltype getExternalLabel(tableint internal_id, char *data_level0_memory_) const
{
labeltype return_label;
memcpy(&return_label, (data_level0_memory_ + internal_id * size_data_per_element_ + label_offset_), sizeof(labeltype));
return return_label;
}
inline void setExternalLabel(tableint internal_id, labeltype label) const
{
memcpy((data_level0_memory_ + internal_id * size_data_per_element_ + label_offset_), &label, sizeof(labeltype));
}
inline labeltype *getExternalLabeLp(tableint internal_id) const
{
return (labeltype *)(data_level0_memory_ + internal_id * size_data_per_element_ + label_offset_);
}
inline labeltype *getExternalLabeLp(tableint internal_id, char *data_level0_memory_) const
{
return (labeltype *)(data_level0_memory_ + internal_id * size_data_per_element_ + label_offset_);
}
inline char *getDataByInternalId(tableint internal_id) const
{
return (data_level0_memory_ + internal_id * size_data_per_element_ + offsetData_);
}
inline char *getDataByInternalId(tableint internal_id, char *data_level0_memory_) const
{
return (data_level0_memory_ + internal_id * size_data_per_element_ + offsetData_);
}
int getRandomLevel(double reverse_size)
{
std::uniform_real_distribution<double> distribution(0.0, 1.0);
double r = -log(distribution(level_generator_)) * reverse_size;
return (int)r;
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
searchBaseLayer(tableint ep_id, const void *data_point, int layer)
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass; //存储已经访问过的元素
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidateSet;
dist_t lowerBound;
if (!isMarkedDeleted(ep_id))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(ep_id), dist_func_param_);
top_candidates.emplace(dist, ep_id); //根据dist,向top_candidates队列中按(由大到小)顺序添加ep_id
lowerBound = dist;
candidateSet.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidateSet.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidateSet.empty())
{
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if ((-curr_el_pair.first) > lowerBound)
{
break;
}
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
std::unique_lock<std::mutex> lock(link_list_locks_[curNodeNum]);
int *data; // = (int *)(linkList0_ + curNodeNum * size_links_per_element0_);
if (layer == 0)
{
data = (int *)get_linklist0(curNodeNum);
}
else
{
data = (int *)get_linklist(curNodeNum, layer);
// data = (int *) (linkLists_[curNodeNum] + (layer - 1) * size_links_per_element_);
}
size_t size = getListCount((linklistsizeint *)data);
tableint *datal = (tableint *)(data + 1);
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*datal), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(datal + 1)), _MM_HINT_T0);
#endif
for (size_t j = 0; j < size; j++)
{
tableint candidate_id = *(datal + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(datal + j + 1)), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(datal + j + 1)), _MM_HINT_T0);
#endif
if (visited_array[candidate_id] == visited_array_tag)
continue;
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(candidate_id));
dist_t dist1 = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef_construction_ || lowerBound > dist1)
{
candidateSet.emplace(-dist1, candidate_id);
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(candidateSet.top().second), _MM_HINT_T0);
#endif
if (!isMarkedDeleted(candidate_id))
top_candidates.emplace(dist1, candidate_id);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
dmd_hnsw_searchBaseLayer(tableint ep_id, const void *data_point, int layer, std::vector<int> mapping_id)
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidateSet;
dist_t lowerBound;
if (!isMarkedDeleted(ep_id))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(mapping_id[ep_id]), dist_func_param_);
top_candidates.emplace(dist, ep_id);
lowerBound = dist;
candidateSet.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidateSet.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidateSet.empty())
{
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if ((-curr_el_pair.first) > lowerBound)
{
break;
}
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
std::unique_lock<std::mutex> lock(link_list_locks_[curNodeNum]);
int *data; // = (int *)(linkList0_ + curNodeNum * size_links_per_element0_);
if (layer == 0)
{
data = (int *)get_linklist0(curNodeNum);
}
else
{
data = (int *)get_linklist(curNodeNum, layer);
// data = (int *) (linkLists_[curNodeNum] + (layer - 1) * size_links_per_element_);
}
size_t size = getListCount((linklistsizeint *)data);
tableint *datal = (tableint *)(data + 1);
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(mapping_id[*datal]), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(mapping_id[*(datal + 1)]), _MM_HINT_T0);
#endif
for (size_t j = 0; j < size; j++)
{
tableint candidate_id = *(datal + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(datal + j + 1)), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(mapping_id[*(datal + j + 1)]), _MM_HINT_T0);
#endif
if (visited_array[candidate_id] == visited_array_tag)
continue;
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(mapping_id[candidate_id]));
dist_t dist1 = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef_construction_ || lowerBound > dist1)
{
candidateSet.emplace(-dist1, candidate_id);
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(mapping_id[candidateSet.top().second]), _MM_HINT_T0);
#endif
if (!isMarkedDeleted(candidate_id))
top_candidates.emplace(dist1, candidate_id);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
multi_layer_searchBaseLayer(tableint ep_id, const void *data_point, int layer, char *data_layer0)
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidateSet;
dist_t lowerBound;
if (!isMarkedDeleted(ep_id, data_layer0))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(ep_id, data_layer0), dist_func_param_);
top_candidates.emplace(dist, ep_id);
lowerBound = dist;
candidateSet.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidateSet.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidateSet.empty())
{
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if ((-curr_el_pair.first) > lowerBound)
{
break;
}
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
std::unique_lock<std::mutex> lock(link_list_locks_[curNodeNum]);
int *data; // = (int *)(linkList0_ + curNodeNum * size_links_per_element0_);
if (layer == 0)
{
data = (int *)get_linklist0(curNodeNum, data_layer0);
}
else
{
data = (int *)get_linklist(curNodeNum, layer);
// data = (int *) (linkLists_[curNodeNum] + (layer - 1) * size_links_per_element_);
}
size_t size = getListCount((linklistsizeint *)data);
tableint *datal = (tableint *)(data + 1);
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*datal, data_layer0), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(datal + 1), data_layer0), _MM_HINT_T0);
#endif
for (size_t j = 0; j < size; j++)
{
tableint candidate_id = *(datal + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(datal + j + 1)), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(datal + j + 1), data_layer0), _MM_HINT_T0);
#endif
if (visited_array[candidate_id] == visited_array_tag)
continue;
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(candidate_id, data_layer0));
dist_t dist1 = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef_construction_ || lowerBound > dist1)
{
candidateSet.emplace(-dist1, candidate_id);
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(candidateSet.top().second, data_layer0), _MM_HINT_T0);
#endif
if (!isMarkedDeleted(candidate_id, data_layer0))
top_candidates.emplace(dist1, candidate_id);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
dmd_hnsw_multi_layer_searchBaseLayer(tableint ep_id, const void *data_point, int layer, char *data_layer0, std::vector<int> mapping_id)
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidateSet;
dist_t lowerBound;
if (!isMarkedDeleted(ep_id, data_layer0))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(mapping_id[ep_id], data_layer0), dist_func_param_);
top_candidates.emplace(dist, ep_id);
lowerBound = dist;
candidateSet.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidateSet.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidateSet.empty())
{
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if ((-curr_el_pair.first) > lowerBound)
{
break;
}
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
std::unique_lock<std::mutex> lock(link_list_locks_[curNodeNum]);
printf("layer=%d\n", layer);
int *data; // = (int *)(linkList0_ + curNodeNum * size_links_per_element0_);
if (layer == 0)
{
data = (int *)get_linklist0(mapping_id[curNodeNum], data_layer0);
}
else
{
data = (int *)get_linklist(curNodeNum, layer);
// data = (int *) (linkLists_[curNodeNum] + (layer - 1) * size_links_per_element_);
}
size_t size = getListCount((linklistsizeint *)data);
tableint *datal = (tableint *)(data + 1);
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(mapping_id[*datal], data_layer0), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(mapping_id[*(datal + 1)], data_layer0), _MM_HINT_T0);
#endif
for (size_t j = 0; j < size; j++)
{
tableint candidate_id = *(datal + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(datal + j + 1)), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(mapping_id[*(datal + j + 1)], data_layer0), _MM_HINT_T0);
#endif
if (visited_array[candidate_id] == visited_array_tag)
continue;
printf("layer1_0=%d\n", layer);
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(mapping_id[candidate_id], data_layer0));
dist_t dist1 = fstdistfunc_(data_point, currObj1, dist_func_param_);
printf("layer1_1=%d\n", layer);
if (top_candidates.size() < ef_construction_ || lowerBound > dist1)
{
candidateSet.emplace(-dist1, candidate_id);
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(mapping_id[candidateSet.top().second], data_layer0), _MM_HINT_T0);
#endif
if (!isMarkedDeleted(candidate_id, data_layer0))
top_candidates.emplace(dist1, candidate_id);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
printf("layer1_2=%d\n", layer);
}
printf("layer1=%d\n", layer);
}
printf("ep_id=%d\n", ep_id);
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
parallel_searchBaseLayer(tableint ep_id, const void *data_point, int layer, int vec_start)
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidateSet;
dist_t lowerBound;
if (!isMarkedDeleted(ep_id))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(ep_id), dist_func_param_);
top_candidates.emplace(dist, ep_id);
lowerBound = dist;
candidateSet.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidateSet.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidateSet.empty())
{
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if ((-curr_el_pair.first) > lowerBound)
{
break;
}
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
std::unique_lock<std::mutex> lock(link_list_locks_[curNodeNum]);
int *data; // = (int *)(linkList0_ + curNodeNum * size_links_per_element0_);
if (layer == 0)
{
data = (int *)get_linklist0(curNodeNum);
}
else
{
data = (int *)get_linklist(curNodeNum, layer);
// data = (int *) (linkLists_[curNodeNum] + (layer - 1) * size_links_per_element_);
}
size_t size = getListCount((linklistsizeint *)data);
tableint *datal = (tableint *)(data + 1);
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*datal), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(datal + 1)), _MM_HINT_T0);
#endif
for (size_t j = 0; j < size; j++)
{
tableint candidate_id = *(datal + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(datal + j + 1)), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(datal + j + 1)), _MM_HINT_T0);
#endif
//if (visited_array[candidate_id] == visited_array_tag || candidate_id > vec_start)
if (visited_array[candidate_id] == visited_array_tag)
continue;
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(candidate_id));
dist_t dist1 = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef_construction_ || lowerBound > dist1)
{
candidateSet.emplace(-dist1, candidate_id);
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(candidateSet.top().second), _MM_HINT_T0);
#endif
if (!isMarkedDeleted(candidate_id))
top_candidates.emplace(dist1, candidate_id);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
mutable std::atomic<long> metric_distance_computations;
mutable std::atomic<long> metric_hops;
template <bool has_deletions, bool collect_metrics = false>
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
searchBaseLayerST(tableint ep_id, const void *data_point, size_t ef) const
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidate_set;
dist_t lowerBound;
if (!has_deletions || !isMarkedDeleted(ep_id))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(ep_id), dist_func_param_);
lowerBound = dist;
top_candidates.emplace(dist, ep_id);
candidate_set.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidate_set.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidate_set.empty())
{
std::pair<dist_t, tableint> current_node_pair = candidate_set.top();
if ((-current_node_pair.first) > lowerBound)
{
break;
}
candidate_set.pop();
tableint current_node_id = current_node_pair.second;
int *data = (int *)get_linklist0(current_node_id);
size_t size = getListCount((linklistsizeint *)data);
// bool cur_node_deleted = isMarkedDeleted(current_node_id);
if (collect_metrics)
{
metric_hops++;
metric_distance_computations += size;
}
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(data_level0_memory_ + (*(data + 1)) * size_data_per_element_ + offsetData_, _MM_HINT_T0);
_mm_prefetch((char *)(data + 2), _MM_HINT_T0);
#endif
for (size_t j = 1; j <= size; j++)
{
int candidate_id = *(data + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + j + 1)), _MM_HINT_T0);
_mm_prefetch(data_level0_memory_ + (*(data + j + 1)) * size_data_per_element_ + offsetData_,
_MM_HINT_T0); ////////////
#endif
if (!(visited_array[candidate_id] == visited_array_tag))
{
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(candidate_id));
dist_t dist = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef || lowerBound > dist)
{
candidate_set.emplace(-dist, candidate_id);
#ifdef USE_SSE
_mm_prefetch(data_level0_memory_ + candidate_set.top().second * size_data_per_element_ +
offsetLevel0_, ///////////
_MM_HINT_T0); ////////////////////////
#endif
if (!has_deletions || !isMarkedDeleted(candidate_id))
top_candidates.emplace(dist, candidate_id);
if (top_candidates.size() > ef)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
template <bool has_deletions, bool collect_metrics = false>
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
multi_layer_searchBaseLayerST(tableint ep_id, const void *data_point, size_t ef, char *data_layer0) const
{
int num_step = 0;
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidate_set;
dist_t lowerBound;
if (!has_deletions || !isMarkedDeleted(ep_id, data_layer0))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(ep_id, data_layer0), dist_func_param_);
lowerBound = dist;
top_candidates.emplace(dist, ep_id);
candidate_set.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidate_set.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidate_set.empty())
{
//num_step++;
std::pair<dist_t, tableint> current_node_pair = candidate_set.top();
if ((-current_node_pair.first) > lowerBound)
{
break;
}
candidate_set.pop();
tableint current_node_id = current_node_pair.second;
int *data = (int *)get_linklist0(current_node_id, data_layer0);
size_t size = getListCount((linklistsizeint *)data);
// bool cur_node_deleted = isMarkedDeleted(current_node_id);
if (collect_metrics)
{
metric_hops++;
metric_distance_computations += size;
}
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(data_layer0 + (*(data + 1)) * size_data_per_element_ + offsetData_, _MM_HINT_T0);
_mm_prefetch((char *)(data + 2), _MM_HINT_T0);
#endif
for (size_t j = 1; j <= size; j++)
{
int candidate_id = *(data + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + j + 1)), _MM_HINT_T0);
_mm_prefetch(data_layer0 + (*(data + j + 1)) * size_data_per_element_ + offsetData_,
_MM_HINT_T0); ////////////
#endif
if (!(visited_array[candidate_id] == visited_array_tag))
{
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(candidate_id, data_layer0));
dist_t dist = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef || lowerBound > dist)
{
candidate_set.emplace(-dist, candidate_id);
#ifdef USE_SSE
_mm_prefetch(data_layer0 + candidate_set.top().second * size_data_per_element_ +
offsetLevel0_, ///////////
_MM_HINT_T0); ////////////////////////
#endif
if (!has_deletions || !isMarkedDeleted(candidate_id, data_layer0))
top_candidates.emplace(dist, candidate_id);
if (top_candidates.size() > ef)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
template <bool has_deletions, bool collect_metrics = false>
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>
test_multi_layer_searchBaseLayerST(tableint ep_id, const void *data_point, size_t ef, char *data_layer0, int *step, FILE *fp) const
{
VisitedList *vl = visited_list_pool_->getFreeVisitedList();
vl_type *visited_array = vl->mass;
vl_type visited_array_tag = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidate_set;
dist_t lowerBound;
if (!has_deletions || !isMarkedDeleted(ep_id, data_layer0))
{
dist_t dist = fstdistfunc_(data_point, getDataByInternalId(ep_id, data_layer0), dist_func_param_);
lowerBound = dist;
top_candidates.emplace(dist, ep_id);
candidate_set.emplace(-dist, ep_id);
}
else
{
lowerBound = std::numeric_limits<dist_t>::max();
candidate_set.emplace(-lowerBound, ep_id);
}
visited_array[ep_id] = visited_array_tag;
while (!candidate_set.empty())
{
std::pair<dist_t, tableint> current_node_pair = candidate_set.top();
if ((-current_node_pair.first) > lowerBound)
{
break;
}
candidate_set.pop();
tableint current_node_id = current_node_pair.second;
int *data = (int *)get_linklist0(current_node_id, data_layer0);
size_t size = getListCount((linklistsizeint *)data);
// bool cur_node_deleted = isMarkedDeleted(current_node_id);
if (collect_metrics)
{
metric_hops++;
metric_distance_computations += size;
}
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + 1)), _MM_HINT_T0);
_mm_prefetch((char *)(visited_array + *(data + 1) + 64), _MM_HINT_T0);
_mm_prefetch(data_layer0 + (*(data + 1)) * size_data_per_element_ + offsetData_, _MM_HINT_T0);
_mm_prefetch((char *)(data + 2), _MM_HINT_T0);
#endif
for (size_t j = 1; j <= size; j++)
{
int candidate_id = *(data + j);
// if (candidate_id == 0) continue;
#ifdef USE_SSE
_mm_prefetch((char *)(visited_array + *(data + j + 1)), _MM_HINT_T0);
_mm_prefetch(data_layer0 + (*(data + j + 1)) * size_data_per_element_ + offsetData_,
_MM_HINT_T0); ////////////
#endif
if (!(visited_array[candidate_id] == visited_array_tag))
{
visited_array[candidate_id] = visited_array_tag;
char *currObj1 = (getDataByInternalId(candidate_id, data_layer0));
dist_t dist = fstdistfunc_(data_point, currObj1, dist_func_param_);
if (top_candidates.size() < ef || lowerBound > dist)
{
candidate_set.emplace(-dist, candidate_id);
(*step)++;
fprintf(fp, "step%d: %d\n", *step, dist);
#ifdef USE_SSE
_mm_prefetch(data_layer0 + candidate_set.top().second * size_data_per_element_ +
offsetLevel0_, ///////////
_MM_HINT_T0); ////////////////////////
#endif
if (!has_deletions || !isMarkedDeleted(candidate_id, data_layer0))
top_candidates.emplace(dist, candidate_id);
if (top_candidates.size() > ef)
top_candidates.pop();
if (!top_candidates.empty())
lowerBound = top_candidates.top().first;
}
}
}
}
visited_list_pool_->releaseVisitedList(vl);
return top_candidates;
}
void getNeighborsByHeuristic2(
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
const size_t M)
{
if (top_candidates.size() < M)
{
return;
}
std::priority_queue<std::pair<dist_t, tableint>> queue_closest;
std::vector<std::pair<dist_t, tableint>> return_list;
while (top_candidates.size() > 0)
{
queue_closest.emplace(-top_candidates.top().first, top_candidates.top().second);
top_candidates.pop();
}
while (queue_closest.size())
{
if (return_list.size() >= M)
break;
std::pair<dist_t, tableint> curent_pair = queue_closest.top();
dist_t dist_to_query = -curent_pair.first;
queue_closest.pop();
bool good = true;
for (std::pair<dist_t, tableint> second_pair : return_list)
{
dist_t curdist =
fstdistfunc_(getDataByInternalId(second_pair.second),
getDataByInternalId(curent_pair.second),
dist_func_param_);
;
if (curdist < dist_to_query)
{
good = false;
break;
}
}
if (good)
{
return_list.push_back(curent_pair);
}
}
for (std::pair<dist_t, tableint> curent_pair : return_list)
{
top_candidates.emplace(-curent_pair.first, curent_pair.second);
}
}
void dmd_hnsw_getNeighborsByHeuristic2(
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
const size_t M, std::vector<int> mapping_id)
{
if (top_candidates.size() < M)
{
return;
}
std::priority_queue<std::pair<dist_t, tableint>> queue_closest;
std::vector<std::pair<dist_t, tableint>> return_list;
while (top_candidates.size() > 0)
{
queue_closest.emplace(-top_candidates.top().first, top_candidates.top().second);
top_candidates.pop();
}
while (queue_closest.size())
{
if (return_list.size() >= M)
break;
std::pair<dist_t, tableint> curent_pair = queue_closest.top();
dist_t dist_to_query = -curent_pair.first;
queue_closest.pop();
bool good = true;
for (std::pair<dist_t, tableint> second_pair : return_list)
{
dist_t curdist =
fstdistfunc_(getDataByInternalId(mapping_id[second_pair.second]),
getDataByInternalId(mapping_id[curent_pair.second]),
dist_func_param_);
;
if (curdist < dist_to_query)
{
good = false;
break;
}
}
if (good)
{
return_list.push_back(curent_pair);
}
}
for (std::pair<dist_t, tableint> curent_pair : return_list)
{
top_candidates.emplace(-curent_pair.first, curent_pair.second);
}
}
void multi_layer_getNeighborsByHeuristic2(
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
const size_t M, char *data_layer0)
{
if (top_candidates.size() < M)
{
return;
}
std::priority_queue<std::pair<dist_t, tableint>> queue_closest;
std::vector<std::pair<dist_t, tableint>> return_list;
while (top_candidates.size() > 0)
{
queue_closest.emplace(-top_candidates.top().first, top_candidates.top().second);
top_candidates.pop();
}
while (queue_closest.size())
{
if (return_list.size() >= M)
break;
std::pair<dist_t, tableint> curent_pair = queue_closest.top();
dist_t dist_to_query = -curent_pair.first;
queue_closest.pop();
bool good = true;
for (std::pair<dist_t, tableint> second_pair : return_list)
{
dist_t curdist =
fstdistfunc_(getDataByInternalId(second_pair.second, data_layer0),
getDataByInternalId(curent_pair.second, data_layer0),
dist_func_param_);
;
if (curdist < dist_to_query)
{
good = false;
break;
}
}
if (good)
{
return_list.push_back(curent_pair);
}
}
for (std::pair<dist_t, tableint> curent_pair : return_list)
{
top_candidates.emplace(-curent_pair.first, curent_pair.second);
}
}
void dmd_hnsw_multi_layer_getNeighborsByHeuristic2(
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
const size_t M, char *data_layer0, std::vector<int> mapping_id)
{
if (top_candidates.size() < M)
{
return;
}
std::priority_queue<std::pair<dist_t, tableint>> queue_closest;
std::vector<std::pair<dist_t, tableint>> return_list;
while (top_candidates.size() > 0)
{
queue_closest.emplace(-top_candidates.top().first, top_candidates.top().second);
top_candidates.pop();
}
while (queue_closest.size())
{
if (return_list.size() >= M)
break;
std::pair<dist_t, tableint> curent_pair = queue_closest.top();
dist_t dist_to_query = -curent_pair.first;
queue_closest.pop();
bool good = true;
for (std::pair<dist_t, tableint> second_pair : return_list)
{
dist_t curdist =
fstdistfunc_(getDataByInternalId(mapping_id[second_pair.second], data_layer0),
getDataByInternalId(mapping_id[curent_pair.second], data_layer0),
dist_func_param_);
;
if (curdist < dist_to_query)
{
good = false;
break;
}
}
if (good)
{
return_list.push_back(curent_pair);
}
}
for (std::pair<dist_t, tableint> curent_pair : return_list)
{
top_candidates.emplace(-curent_pair.first, curent_pair.second);
}
}
linklistsizeint *get_linklist0(tableint internal_id) const
{
return (linklistsizeint *)(data_level0_memory_ + internal_id * size_data_per_element_ + offsetLevel0_);
};
linklistsizeint *get_linklist0(tableint internal_id, char *data_level0) const
{
return (linklistsizeint *)(data_level0 + internal_id * size_data_per_element_ + offsetLevel0_);
};
linklistsizeint *get_linklist(tableint internal_id, int level) const
{
return (linklistsizeint *)(linkLists_[internal_id] + (level - 1) * size_links_per_element_);
};
linklistsizeint *get_linklist_at_level(tableint internal_id, int level) const
{
return level == 0 ? get_linklist0(internal_id) : get_linklist(internal_id, level);
};
tableint mutuallyConnectNewElement(const void *data_point, tableint cur_c,
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
int level, bool isUpdate)
{
size_t Mcurmax = level ? maxM_ : maxM0_;
getNeighborsByHeuristic2(top_candidates, Mcurmax);
if (top_candidates.size() > Mcurmax)
throw std::runtime_error("Should be not be more than M_ candidates returned by the heuristic");
std::vector<tableint> selectedNeighbors;
selectedNeighbors.reserve(Mcurmax);
while (top_candidates.size() > 0)
{
selectedNeighbors.push_back(top_candidates.top().second);
top_candidates.pop();
}
tableint next_closest_entry_point = selectedNeighbors[0];
{
linklistsizeint *ll_cur;
if (level == 0)
ll_cur = get_linklist0(cur_c);
else
ll_cur = get_linklist(cur_c, level);
if (*ll_cur && !isUpdate)
{
throw std::runtime_error("The newly inserted element should have blank link list");
}
setListCount(ll_cur, selectedNeighbors.size());
tableint *data = (tableint *)(ll_cur + 1);
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
if (data[idx] && !isUpdate)
throw std::runtime_error("Possible memory corruption");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
data[idx] = selectedNeighbors[idx];
}
}
if (level == 0)
{
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
std::unique_lock<std::mutex> lock(link_list_locks_[selectedNeighbors[idx]]);
linklistsizeint *ll_other;
if (level == 0)
ll_other = get_linklist0(selectedNeighbors[idx]);
else
ll_other = get_linklist(selectedNeighbors[idx], level);
size_t sz_link_list_other = getListCount(ll_other);
if (sz_link_list_other > Mcurmax)
throw std::runtime_error("Bad value of sz_link_list_other");
if (selectedNeighbors[idx] == cur_c)
throw std::runtime_error("Trying to connect an element to itself");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
tableint *data = (tableint *)(ll_other + 1);
bool is_cur_c_present = false;
if (isUpdate)
{
for (size_t j = 0; j < sz_link_list_other; j++)
{
if (data[j] == cur_c)
{
is_cur_c_present = true;
break;
}
}
}
// If cur_c is already present in the neighboring connections of `selectedNeighbors[idx]` then no need to modify any connections or run the heuristics.
if (!is_cur_c_present)
{
if (sz_link_list_other < Mcurmax)
{
data[sz_link_list_other] = cur_c;
setListCount(ll_other, sz_link_list_other + 1);
}
else
//if (sz_link_list_other >= Mcurmax)
{
//if (sz_link_list_other >= Mcurmax) {
// finding the "weakest" element to replace it with the new one
dist_t d_max = fstdistfunc_(getDataByInternalId(cur_c), getDataByInternalId(selectedNeighbors[idx]),
dist_func_param_);
// Heuristic:
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidates;
candidates.emplace(d_max, cur_c);
for (size_t j = 0; j < sz_link_list_other; j++)
{
candidates.emplace(
fstdistfunc_(getDataByInternalId(data[j]), getDataByInternalId(selectedNeighbors[idx]),
dist_func_param_),
data[j]);
}
getNeighborsByHeuristic2(candidates, Mcurmax);
int indx = 0;
while (candidates.size() > 0)
//while (indx < Mcurmax && candidates.size() > 0)
{
data[indx] = candidates.top().second;
candidates.pop();
indx++;
}
setListCount(ll_other, indx);
// Nearest K:
//int indx = -1;
//for (int j = 0; j < sz_link_list_other; j++) {
//dist_t d = fstdistfunc_(getDataByInternalId(data[j]), getDataByInternalId(rez[idx]), dist_func_param_);
//if (d > d_max) {
//indx = j;
//d_max = d;
//}
//}
//if (indx >= 0) {
//data[indx] = cur_c;
//}
}
}
}
}
return next_closest_entry_point;
}
tableint dmd_hnsw_mutuallyConnectNewElement(const void *data_point, tableint cur_c,
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
int level, bool isUpdate, std::vector<int> mapping_id)
{
size_t Mcurmax = level ? maxM_ : maxM0_;
dmd_hnsw_getNeighborsByHeuristic2(top_candidates, Mcurmax, mapping_id); //原始代码为M_
if (top_candidates.size() > Mcurmax) //原始代码为M_
throw std::runtime_error("Should be not be more than M_ candidates returned by the heuristic");
std::vector<tableint> selectedNeighbors;
selectedNeighbors.reserve(Mcurmax);
while (top_candidates.size() > 0)
{
selectedNeighbors.push_back(top_candidates.top().second);
top_candidates.pop();
}
tableint next_closest_entry_point = selectedNeighbors[0];
{
linklistsizeint *ll_cur;
if (level == 0)
ll_cur = get_linklist0(cur_c);
else
ll_cur = get_linklist(cur_c, level);
if (*ll_cur && !isUpdate)
{
throw std::runtime_error("The newly inserted element should have blank link list");
}
setListCount(ll_cur, selectedNeighbors.size());
tableint *data = (tableint *)(ll_cur + 1);
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
if (data[idx] && !isUpdate)
throw std::runtime_error("Possible memory corruption");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
data[idx] = selectedNeighbors[idx];
}
}
//if (level == 0)
//{
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
std::unique_lock<std::mutex> lock(link_list_locks_[selectedNeighbors[idx]]);
linklistsizeint *ll_other;
if (level == 0)
ll_other = get_linklist0(selectedNeighbors[idx]);
else
ll_other = get_linklist(selectedNeighbors[idx], level);
size_t sz_link_list_other = getListCount(ll_other);
if (sz_link_list_other > Mcurmax)
throw std::runtime_error("Bad value of sz_link_list_other");
if (selectedNeighbors[idx] == cur_c)
throw std::runtime_error("Trying to connect an element to itself");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
tableint *data = (tableint *)(ll_other + 1);
bool is_cur_c_present = false;
if (isUpdate)
{
for (size_t j = 0; j < sz_link_list_other; j++)
{
if (data[j] == cur_c)
{
is_cur_c_present = true;
break;
}
}
}
// If cur_c is already present in the neighboring connections of `selectedNeighbors[idx]` then no need to modify any connections or run the heuristics.
if (!is_cur_c_present)
{
if (sz_link_list_other < Mcurmax)
{
data[sz_link_list_other] = cur_c;
setListCount(ll_other, sz_link_list_other + 1);
}
else
//if (sz_link_list_other >= Mcurmax)
{
//if (sz_link_list_other >= Mcurmax) {
// finding the "weakest" element to replace it with the new one
dist_t d_max = fstdistfunc_(getDataByInternalId(mapping_id[cur_c]), getDataByInternalId(mapping_id[selectedNeighbors[idx]]),
dist_func_param_);
// Heuristic:
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidates;
candidates.emplace(d_max, cur_c);
for (size_t j = 0; j < sz_link_list_other; j++)
{
candidates.emplace(
fstdistfunc_(getDataByInternalId(mapping_id[data[j]]), getDataByInternalId(mapping_id[selectedNeighbors[idx]]),
dist_func_param_),
data[j]);
}
dmd_hnsw_getNeighborsByHeuristic2(candidates, Mcurmax, mapping_id);
int indx = 0;
while (candidates.size() > 0)
//while (indx < Mcurmax && candidates.size() > 0)
{
data[indx] = candidates.top().second;
candidates.pop();
indx++;
}
setListCount(ll_other, indx);
// Nearest K:
//int indx = -1;
//for (int j = 0; j < sz_link_list_other; j++) {
//dist_t d = fstdistfunc_(getDataByInternalId(data[j]), getDataByInternalId(rez[idx]), dist_func_param_);
//if (d > d_max) {
//indx = j;
//d_max = d;
//}
//}
//if (indx >= 0) {
//data[indx] = cur_c;
//}
}
}
}
//}
return next_closest_entry_point;
}
tableint multi_layer_mutuallyConnectNewElement(const void *data_point, tableint cur_c,
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
int level, bool isUpdate, char *data_layer0)
{
size_t Mcurmax = level ? maxM_ : maxM0_;
multi_layer_getNeighborsByHeuristic2(top_candidates, Mcurmax, data_layer0); //原始代码为M_
if (top_candidates.size() > Mcurmax) //原始代码为M_
throw std::runtime_error("Should be not be more than M_ candidates returned by the heuristic");
std::vector<tableint> selectedNeighbors;
selectedNeighbors.reserve(Mcurmax);
while (top_candidates.size() > 0)
{
selectedNeighbors.push_back(top_candidates.top().second);
top_candidates.pop();
}
tableint next_closest_entry_point = selectedNeighbors[0];
{
linklistsizeint *ll_cur;
if (level == 0)
ll_cur = get_linklist0(cur_c, data_layer0);
else
ll_cur = get_linklist(cur_c, level);
if (*ll_cur && !isUpdate)
{
throw std::runtime_error("The newly inserted element should have blank link list");
}
setListCount(ll_cur, selectedNeighbors.size());
tableint *data = (tableint *)(ll_cur + 1);
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
if (data[idx] && !isUpdate)
throw std::runtime_error("Possible memory corruption");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
data[idx] = selectedNeighbors[idx];
}
}
if (level == 0)
{
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
std::unique_lock<std::mutex> lock(link_list_locks_[selectedNeighbors[idx]]);
linklistsizeint *ll_other;
if (level == 0)
ll_other = get_linklist0(selectedNeighbors[idx], data_layer0);
else
ll_other = get_linklist(selectedNeighbors[idx], level);
size_t sz_link_list_other = getListCount(ll_other);
if (sz_link_list_other > Mcurmax)
throw std::runtime_error("Bad value of sz_link_list_other");
if (selectedNeighbors[idx] == cur_c)
throw std::runtime_error("Trying to connect an element to itself");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
tableint *data = (tableint *)(ll_other + 1);
bool is_cur_c_present = false;
if (isUpdate)
{
for (size_t j = 0; j < sz_link_list_other; j++)
{
if (data[j] == cur_c)
{
is_cur_c_present = true;
break;
}
}
}
// If cur_c is already present in the neighboring connections of `selectedNeighbors[idx]` then no need to modify any connections or run the heuristics.
if (!is_cur_c_present)
{
if (sz_link_list_other < Mcurmax)
{
data[sz_link_list_other] = cur_c;
setListCount(ll_other, sz_link_list_other + 1);
}
else
//if (sz_link_list_other >= Mcurmax)
{
//if (sz_link_list_other >= Mcurmax) {
// finding the "weakest" element to replace it with the new one
dist_t d_max = fstdistfunc_(getDataByInternalId(cur_c, data_layer0), getDataByInternalId(selectedNeighbors[idx], data_layer0),
dist_func_param_);
// Heuristic:
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidates;
candidates.emplace(d_max, cur_c);
for (size_t j = 0; j < sz_link_list_other; j++)
{
candidates.emplace(
fstdistfunc_(getDataByInternalId(data[j], data_layer0), getDataByInternalId(selectedNeighbors[idx], data_layer0),
dist_func_param_),
data[j]);
}
multi_layer_getNeighborsByHeuristic2(candidates, Mcurmax, data_layer0);
int indx = 0;
while (candidates.size() > 0)
//while (indx < Mcurmax && candidates.size() > 0)
{
data[indx] = candidates.top().second;
candidates.pop();
indx++;
}
setListCount(ll_other, indx);
// Nearest K:
//int indx = -1;
//for (int j = 0; j < sz_link_list_other; j++) {
//dist_t d = fstdistfunc_(getDataByInternalId(data[j]), getDataByInternalId(rez[idx]), dist_func_param_);
//if (d > d_max) {
//indx = j;
//d_max = d;
//}
//}
//if (indx >= 0) {
//data[indx] = cur_c;
//}
}
}
}
}
return next_closest_entry_point;
}
tableint dmd_hnsw_multi_layer_mutuallyConnectNewElement(const void *data_point, tableint cur_c,
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
int level, bool isUpdate, char *data_layer0, std::vector<int> mapping_id)
{
size_t Mcurmax = level ? maxM_ : maxM0_;
dmd_hnsw_multi_layer_getNeighborsByHeuristic2(top_candidates, Mcurmax, data_layer0, mapping_id); //原始代码为M_
if (top_candidates.size() > Mcurmax) //原始代码为M_
throw std::runtime_error("Should be not be more than M_ candidates returned by the heuristic");
std::vector<tableint> selectedNeighbors;
selectedNeighbors.reserve(Mcurmax);
while (top_candidates.size() > 0)
{
selectedNeighbors.push_back(top_candidates.top().second);
top_candidates.pop();
}
tableint next_closest_entry_point = selectedNeighbors[0];
{
linklistsizeint *ll_cur;
if (level == 0)
ll_cur = get_linklist0(mapping_id[cur_c], data_layer0);
else
ll_cur = get_linklist(cur_c, level);
if (*ll_cur && !isUpdate)
{
throw std::runtime_error("The newly inserted element should have blank link list");
}
setListCount(ll_cur, selectedNeighbors.size());
tableint *data = (tableint *)(ll_cur + 1);
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
if (data[idx] && !isUpdate)
throw std::runtime_error("Possible memory corruption");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
data[idx] = selectedNeighbors[idx];
}
}
//if (level == 0)
//{
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
std::unique_lock<std::mutex> lock(link_list_locks_[selectedNeighbors[idx]]);
linklistsizeint *ll_other;
if (level == 0)
ll_other = get_linklist0(mapping_id[selectedNeighbors[idx]], data_layer0);
else
ll_other = get_linklist(selectedNeighbors[idx], level);
size_t sz_link_list_other = getListCount(ll_other);
if (sz_link_list_other > Mcurmax)
throw std::runtime_error("Bad value of sz_link_list_other");
if (selectedNeighbors[idx] == cur_c)
throw std::runtime_error("Trying to connect an element to itself");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
tableint *data = (tableint *)(ll_other + 1);
bool is_cur_c_present = false;
if (isUpdate)
{
for (size_t j = 0; j < sz_link_list_other; j++)
{
if (data[j] == cur_c)
{
is_cur_c_present = true;
break;
}
}
}
// If cur_c is already present in the neighboring connections of `selectedNeighbors[idx]` then no need to modify any connections or run the heuristics.
if (!is_cur_c_present)
{
if (sz_link_list_other < Mcurmax)
{
data[sz_link_list_other] = cur_c;
setListCount(ll_other, sz_link_list_other + 1);
}
else
//if (sz_link_list_other >= Mcurmax)
{
//if (sz_link_list_other >= Mcurmax) {
// finding the "weakest" element to replace it with the new one
dist_t d_max = fstdistfunc_(getDataByInternalId(mapping_id[cur_c], data_layer0), getDataByInternalId(mapping_id[selectedNeighbors[idx]], data_layer0),
dist_func_param_);
// Heuristic:
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidates;
candidates.emplace(d_max, cur_c);
for (size_t j = 0; j < sz_link_list_other; j++)
{
candidates.emplace(
fstdistfunc_(getDataByInternalId(mapping_id[data[j]], data_layer0), getDataByInternalId(mapping_id[selectedNeighbors[idx]], data_layer0),
dist_func_param_),
data[j]);
}
dmd_hnsw_multi_layer_getNeighborsByHeuristic2(candidates, Mcurmax, data_layer0, mapping_id);
int indx = 0;
while (candidates.size() > 0)
//while (indx < Mcurmax && candidates.size() > 0)
{
data[indx] = candidates.top().second;
candidates.pop();
indx++;
}
setListCount(ll_other, indx);
// Nearest K:
//int indx = -1;
//for (int j = 0; j < sz_link_list_other; j++) {
//dist_t d = fstdistfunc_(getDataByInternalId(data[j]), getDataByInternalId(rez[idx]), dist_func_param_);
//if (d > d_max) {
//indx = j;
//d_max = d;
//}
//}
//if (indx >= 0) {
//data[indx] = cur_c;
//}
}
}
}
//}
return next_closest_entry_point;
}
tableint batch_mutuallyConnectNewElement(const void *data_point, tableint cur_c,
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> &top_candidates,
int level, bool isUpdate)
{
size_t Mcurmax = level ? maxM_ : maxM0_;
getNeighborsByHeuristic2(top_candidates, Mcurmax);
if (top_candidates.size() > Mcurmax)
throw std::runtime_error("Should be not be more than M_ candidates returned by the heuristic");
std::vector<tableint> selectedNeighbors;
//std::priority_queue<tableint> Neighbors_queue;
selectedNeighbors.reserve(Mcurmax);
while (top_candidates.size() > 0)
{
selectedNeighbors.push_back(top_candidates.top().second);
//Neighbors_queue.emplace(-top_candidates.top().second);
top_candidates.pop();
}
/*
while (Neighbors_queue.size() > 0)
{
selectedNeighbors.push_back(-Neighbors_queue.top());
Neighbors_queue.pop();
}
*/
tableint next_closest_entry_point = selectedNeighbors[0];
{
linklistsizeint *ll_cur;
if (level == 0)
ll_cur = get_linklist0(cur_c);
else
ll_cur = get_linklist(cur_c, level);
if (*ll_cur && !isUpdate)
{
throw std::runtime_error("The newly inserted element should have blank link list");
}
setListCount(ll_cur, selectedNeighbors.size());
tableint *data = (tableint *)(ll_cur + 1);
for (size_t idx = 0; idx < selectedNeighbors.size(); idx++)
{
if (data[idx] && !isUpdate)
throw std::runtime_error("Possible memory corruption");
if (level > element_levels_[selectedNeighbors[idx]])
throw std::runtime_error("Trying to make a link on a non-existent level");
data[idx] = selectedNeighbors[idx];
}
}
return next_closest_entry_point;
}
void neighbors_connect(tableint cur_c, tableint selected_N)
{
linklistsizeint *ll_other;
ll_other = get_linklist0(selected_N);
tableint *data = (tableint *)(ll_other + 1);
size_t sz_link_list_other = getListCount(ll_other);
dist_t d_max = fstdistfunc_(getDataByInternalId(cur_c), getDataByInternalId(selected_N),
dist_func_param_);
// Heuristic:
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidates;
candidates.emplace(d_max, cur_c);
for (size_t j = 0; j < sz_link_list_other; j++)
{
candidates.emplace(
fstdistfunc_(getDataByInternalId(data[j]), getDataByInternalId(selected_N),
dist_func_param_),
data[j]);
}
getNeighborsByHeuristic2(candidates, maxM0_);
int indx = 0;
while (candidates.size() > 0)
//while (indx < Mcurmax && candidates.size() > 0)
{
data[indx] = candidates.top().second;
candidates.pop();
indx++;
}
setListCount(ll_other, indx);
}
std::mutex global;
size_t ef_;
void setEf(size_t ef)
{
ef_ = ef;
}
std::priority_queue<std::pair<dist_t, tableint>> searchKnnInternal(void *query_data, int k)
{
std::priority_queue<std::pair<dist_t, tableint>> top_candidates;
if (cur_element_count == 0)
return top_candidates;
tableint currObj = enterpoint_node_;
dist_t curdist = fstdistfunc_(query_data, getDataByInternalId(enterpoint_node_), dist_func_param_);
for (size_t level = maxlevel_; level > 0; level--)
{
bool changed = true;
while (changed)
{
changed = false;
int *data;
data = (int *)get_linklist(currObj, level);
int size = getListCount(data);
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
if (has_deletions_)
{
std::priority_queue<std::pair<dist_t, tableint>> top_candidates1 = searchBaseLayerST<true>(currObj, query_data,
ef_);
top_candidates.swap(top_candidates1);
}
else
{
std::priority_queue<std::pair<dist_t, tableint>> top_candidates1 = searchBaseLayerST<false>(currObj, query_data,
ef_);
top_candidates.swap(top_candidates1);
}
while (top_candidates.size() > k)
{
top_candidates.pop();
}
return top_candidates;
};
void resizeIndex(size_t new_max_elements)
{
if (new_max_elements < cur_element_count)
throw std::runtime_error("Cannot resize, max element is less than the current number of elements");
delete visited_list_pool_;
visited_list_pool_ = new VisitedListPool(1, new_max_elements);
element_levels_.resize(new_max_elements);
std::vector<std::mutex>(new_max_elements).swap(link_list_locks_);
// Reallocate base layer
char *data_level0_memory_new = (char *)malloc(new_max_elements * size_data_per_element_);
if (data_level0_memory_new == nullptr)
throw std::runtime_error("Not enough memory: resizeIndex failed to allocate base layer");
memcpy(data_level0_memory_new, data_level0_memory_, cur_element_count * size_data_per_element_);
free(data_level0_memory_);
data_level0_memory_ = data_level0_memory_new;
// Reallocate all other layers
char **linkLists_new = (char **)malloc(sizeof(void *) * new_max_elements);
if (linkLists_new == nullptr)
throw std::runtime_error("Not enough memory: resizeIndex failed to allocate other layers");
memcpy(linkLists_new, linkLists_, cur_element_count * sizeof(void *));
free(linkLists_);
linkLists_ = linkLists_new;
max_elements_ = new_max_elements;
}
void saveIndex(const std::string &location)
{
std::ofstream output(location, std::ios::binary);
std::streampos position;
writeBinaryPOD(output, offsetLevel0_);
writeBinaryPOD(output, max_elements_);
writeBinaryPOD(output, cur_element_count);
writeBinaryPOD(output, size_data_per_element_);
writeBinaryPOD(output, label_offset_);
writeBinaryPOD(output, offsetData_);
writeBinaryPOD(output, maxlevel_);
writeBinaryPOD(output, enterpoint_node_);
writeBinaryPOD(output, maxM_);
writeBinaryPOD(output, maxM0_);
writeBinaryPOD(output, M_);
writeBinaryPOD(output, mult_);
writeBinaryPOD(output, ef_construction_);
output.write(data_level0_memory_, cur_element_count * size_data_per_element_);
for (size_t i = 0; i < cur_element_count; i++)
{
unsigned int linkListSize = element_levels_[i] > 0 ? size_links_per_element_ * element_levels_[i] : 0;
writeBinaryPOD(output, linkListSize);
if (linkListSize)
output.write(linkLists_[i], linkListSize);
}
output.close();
}
void loadIndex(const std::string &location, SpaceInterface<dist_t> *s, size_t max_elements_i = 0)
{
std::ifstream input(location, std::ios::binary);
if (!input.is_open())
throw std::runtime_error("Cannot open file");
// get file size:
input.seekg(0, input.end);
std::streampos total_filesize = input.tellg();
input.seekg(0, input.beg);
readBinaryPOD(input, offsetLevel0_);
readBinaryPOD(input, max_elements_);
readBinaryPOD(input, cur_element_count);
size_t max_elements = max_elements_i;
if (max_elements < cur_element_count)
max_elements = max_elements_;
max_elements_ = max_elements;
readBinaryPOD(input, size_data_per_element_);
readBinaryPOD(input, label_offset_);
readBinaryPOD(input, offsetData_);
readBinaryPOD(input, maxlevel_);
readBinaryPOD(input, enterpoint_node_);
readBinaryPOD(input, maxM_);
readBinaryPOD(input, maxM0_);
readBinaryPOD(input, M_);
readBinaryPOD(input, mult_);
readBinaryPOD(input, ef_construction_);
data_size_ = s->get_data_size();
fstdistfunc_ = s->get_dist_func();
dist_func_param_ = s->get_dist_func_param();
auto pos = input.tellg();
/// Optional - check if index is ok:
input.seekg(cur_element_count * size_data_per_element_, input.cur);
for (size_t i = 0; i < cur_element_count; i++)
{
if (input.tellg() < 0 || input.tellg() >= total_filesize)
{
throw std::runtime_error("Index seems to be corrupted or unsupported");
}
unsigned int linkListSize;
readBinaryPOD(input, linkListSize);
if (linkListSize != 0)
{
input.seekg(linkListSize, input.cur);
}
}
// throw exception if it either corrupted or old index
if (input.tellg() != total_filesize)
throw std::runtime_error("Index seems to be corrupted or unsupported");
input.clear();
/// Optional check end
input.seekg(pos, input.beg);
data_level0_memory_ = (char *)malloc(max_elements * size_data_per_element_);
if (data_level0_memory_ == nullptr)
throw std::runtime_error("Not enough memory: loadIndex failed to allocate level0");
input.read(data_level0_memory_, cur_element_count * size_data_per_element_);
size_links_per_element_ = maxM_ * sizeof(tableint) + sizeof(linklistsizeint);
size_links_level0_ = maxM0_ * sizeof(tableint) + sizeof(linklistsizeint);
std::vector<std::mutex>(max_elements).swap(link_list_locks_);
std::vector<std::mutex>(max_update_element_locks).swap(link_list_update_locks_);
visited_list_pool_ = new VisitedListPool(1, max_elements);
linkLists_ = (char **)malloc(sizeof(void *) * max_elements);
if (linkLists_ == nullptr)
throw std::runtime_error("Not enough memory: loadIndex failed to allocate linklists");
element_levels_ = std::vector<int>(max_elements);
revSize_ = 1.0 / mult_;
ef_ = 10;
for (size_t i = 0; i < cur_element_count; i++)
{
label_lookup_[getExternalLabel(i)] = i;
unsigned int linkListSize;
readBinaryPOD(input, linkListSize);
if (linkListSize == 0)
{
element_levels_[i] = 0;
linkLists_[i] = nullptr;
}
else
{
element_levels_[i] = linkListSize / size_links_per_element_;
linkLists_[i] = (char *)malloc(linkListSize);
if (linkLists_[i] == nullptr)
throw std::runtime_error("Not enough memory: loadIndex failed to allocate linklist");
input.read(linkLists_[i], linkListSize);
}
}
has_deletions_ = false;
for (size_t i = 0; i < cur_element_count; i++)
{
if (isMarkedDeleted(i))
has_deletions_ = true;
}
input.close();
return;
}
template <typename data_t>
std::vector<data_t> getDataByLabel(labeltype label)
{
tableint label_c;
auto search = label_lookup_.find(label);
if (search == label_lookup_.end() || isMarkedDeleted(search->second))
{
throw std::runtime_error("Label not found");
}
label_c = search->second;
char *data_ptrv = getDataByInternalId(label_c);
size_t dim = *((size_t *)dist_func_param_);
std::vector<data_t> data;
data_t *data_ptr = (data_t *)data_ptrv;
for (int i = 0; i < dim; i++)
{
data.push_back(*data_ptr);
data_ptr += 1;
}
return data;
}
static const unsigned char DELETE_MARK = 0x01;
// static const unsigned char REUSE_MARK = 0x10;
/**
* Marks an element with the given label deleted, does NOT really change the current graph.
* @param label
*/
void markDelete(labeltype label)
{
has_deletions_ = true;
auto search = label_lookup_.find(label);
if (search == label_lookup_.end())
{
throw std::runtime_error("Label not found");
}
markDeletedInternal(search->second);
}
/**
* Uses the first 8 bits of the memory for the linked list to store the mark,
* whereas maxM0_ has to be limited to the lower 24 bits, however, still large enough in almost all cases.
* @param internalId
*/
void markDeletedInternal(tableint internalId)
{
unsigned char *ll_cur = ((unsigned char *)get_linklist0(internalId)) + 2;
*ll_cur |= DELETE_MARK;
}
/**
* Remove the deleted mark of the node.
* @param internalId
*/
void unmarkDeletedInternal(tableint internalId)
{
unsigned char *ll_cur = ((unsigned char *)get_linklist0(internalId)) + 2;
*ll_cur &= ~DELETE_MARK;
}
/**
* Checks the first 8 bits of the memory to see if the element is marked deleted.
* @param internalId
* @return
*/
bool isMarkedDeleted(tableint internalId) const
{
unsigned char *ll_cur = ((unsigned char *)get_linklist0(internalId)) + 2;
return *ll_cur & DELETE_MARK;
}
bool isMarkedDeleted(tableint internalId, char *data_level0) const
{
unsigned char *ll_cur = ((unsigned char *)get_linklist0(internalId, data_level0)) + 2;
return *ll_cur & DELETE_MARK;
}
unsigned short int getListCount(linklistsizeint *ptr) const
{
return *((unsigned short int *)ptr);
}
void setListCount(linklistsizeint *ptr, unsigned short int size) const
{
*((unsigned short int *)(ptr)) = *((unsigned short int *)&size);
}
void addPoint(const void *data_point, labeltype label)
{
addPoint(data_point, label, -1);
}
void updatePoint(const void *dataPoint, tableint internalId, float updateNeighborProbability)
{
// update the feature vector associated with existing point with new vector
memcpy(getDataByInternalId(internalId), dataPoint, data_size_);
int maxLevelCopy = maxlevel_;
tableint entryPointCopy = enterpoint_node_;
// If point to be updated is entry point and graph just contains single element then just return.
if (entryPointCopy == internalId && cur_element_count == 1)
return;
int elemLevel = element_levels_[internalId];
std::uniform_real_distribution<float> distribution(0.0, 1.0);
for (int layer = 0; layer <= elemLevel; layer++)
{
std::unordered_set<tableint> sCand;
std::unordered_set<tableint> sNeigh;
std::vector<tableint> listOneHop = getConnectionsWithLock(internalId, layer);
if (listOneHop.size() == 0)
continue;
sCand.insert(internalId);
for (auto &&elOneHop : listOneHop)
{
sCand.insert(elOneHop);
if (distribution(update_probability_generator_) > updateNeighborProbability)
continue;
sNeigh.insert(elOneHop);
std::vector<tableint> listTwoHop = getConnectionsWithLock(elOneHop, layer);
for (auto &&elTwoHop : listTwoHop)
{
sCand.insert(elTwoHop);
}
}
for (auto &&neigh : sNeigh)
{
// if (neigh == internalId)
// continue;
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> candidates;
int size = sCand.find(neigh) == sCand.end() ? sCand.size() : sCand.size() - 1;
int elementsToKeep = std::min(int(ef_construction_), size);
for (auto &&cand : sCand)
{
if (cand == neigh)
continue;
dist_t distance = fstdistfunc_(getDataByInternalId(neigh), getDataByInternalId(cand), dist_func_param_);
if (candidates.size() < elementsToKeep)
{
candidates.emplace(distance, cand);
}
else
{
if (distance < candidates.top().first)
{
candidates.pop();
candidates.emplace(distance, cand);
}
}
}
// Retrieve neighbours using heuristic and set connections.
getNeighborsByHeuristic2(candidates, layer == 0 ? maxM0_ : maxM_);
{
std::unique_lock<std::mutex> lock(link_list_locks_[neigh]);
linklistsizeint *ll_cur;
ll_cur = get_linklist_at_level(neigh, layer);
int candSize = candidates.size();
setListCount(ll_cur, candSize);
tableint *data = (tableint *)(ll_cur + 1);
for (size_t idx = 0; idx < candSize; idx++)
{
data[idx] = candidates.top().second;
candidates.pop();
}
}
}
}
repairConnectionsForUpdate(dataPoint, entryPointCopy, internalId, elemLevel, maxLevelCopy);
};
void repairConnectionsForUpdate(const void *dataPoint, tableint entryPointInternalId, tableint dataPointInternalId, int dataPointLevel, int maxLevel)
{
tableint currObj = entryPointInternalId;
if (dataPointLevel < maxLevel)
{
dist_t curdist = fstdistfunc_(dataPoint, getDataByInternalId(currObj), dist_func_param_);
for (int level = maxLevel; level > dataPointLevel; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
std::unique_lock<std::mutex> lock(link_list_locks_[currObj]);
data = get_linklist_at_level(currObj, level);
int size = getListCount(data);
tableint *datal = (tableint *)(data + 1);
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(*datal), _MM_HINT_T0);
#endif
for (int i = 0; i < size; i++)
{
#ifdef USE_SSE
_mm_prefetch(getDataByInternalId(*(datal + i + 1)), _MM_HINT_T0);
#endif
tableint cand = datal[i];
dist_t d = fstdistfunc_(dataPoint, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
}
if (dataPointLevel > maxLevel)
throw std::runtime_error("Level of item to be updated cannot be bigger than max level");
for (int level = dataPointLevel; level >= 0; level--)
{
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> topCandidates = searchBaseLayer(
currObj, dataPoint, level);
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> filteredTopCandidates;
while (topCandidates.size() > 0)
{
if (topCandidates.top().second != dataPointInternalId)
filteredTopCandidates.push(topCandidates.top());
topCandidates.pop();
}
// Since element_levels_ is being used to get `dataPointLevel`, there could be cases where `topCandidates` could just contains entry point itself.
// To prevent self loops, the `topCandidates` is filtered and thus can be empty.
if (filteredTopCandidates.size() > 0)
{
bool epDeleted = isMarkedDeleted(entryPointInternalId);
if (epDeleted)
{
filteredTopCandidates.emplace(fstdistfunc_(dataPoint, getDataByInternalId(entryPointInternalId), dist_func_param_), entryPointInternalId);
if (filteredTopCandidates.size() > ef_construction_)
filteredTopCandidates.pop();
}
currObj = mutuallyConnectNewElement(dataPoint, dataPointInternalId, filteredTopCandidates, level, true);
}
}
}
std::vector<tableint> getConnectionsWithLock(tableint internalId, int level)
{
std::unique_lock<std::mutex> lock(link_list_locks_[internalId]);
unsigned int *data = get_linklist_at_level(internalId, level);
int size = getListCount(data);
std::vector<tableint> result(size);
tableint *ll = (tableint *)(data + 1);
memcpy(result.data(), ll, size * sizeof(tableint));
return result;
};
tableint addPoint(const void *data_point, labeltype label, int level)
{
tableint cur_c = 0;
{
// Checking if the element with the same label already exists
// if so, updating it *instead* of creating a new element.
std::unique_lock<std::mutex> templock_curr(cur_element_count_guard_);
auto search = label_lookup_.find(label);
if (search != label_lookup_.end())
{
tableint existingInternalId = search->second;
templock_curr.unlock();
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(existingInternalId & (max_update_element_locks - 1))]);
updatePoint(data_point, existingInternalId, 1.0);
return existingInternalId;
}
if (cur_element_count >= max_elements_)
{
throw std::runtime_error("The number of elements exceeds the specified limit");
};
cur_c = cur_element_count;
cur_element_count++;
label_lookup_[label] = cur_c;
}
// Take update lock to prevent race conditions on an element with insertion/update at the same time.
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(cur_c & (max_update_element_locks - 1))]);
std::unique_lock<std::mutex> lock_el(link_list_locks_[cur_c]);
int curlevel = getRandomLevel(mult_);
if (level > 0) //level = -1, 不执行
curlevel = level;
element_levels_[cur_c] = curlevel;
std::unique_lock<std::mutex> templock(global);
int maxlevelcopy = maxlevel_;
if (curlevel <= maxlevelcopy)
templock.unlock();
tableint currObj = enterpoint_node_;
tableint enterpoint_copy = enterpoint_node_;
memset(data_level0_memory_ + cur_c * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
for (int i = 0; i < num_layer; i++)
{
memset(data_level0_memory_multi_layer[i] + cur_c * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
}
// Initialisation of the data and label
memcpy(getExternalLabeLp(cur_c), &label, sizeof(labeltype));
memcpy(getDataByInternalId(cur_c), data_point, data_size_);
if (curlevel)
{
linkLists_[cur_c] = (char *)malloc(size_links_per_element_ * curlevel + 1);
if (linkLists_[cur_c] == nullptr)
throw std::runtime_error("Not enough memory: addPoint failed to allocate linklist");
memset(linkLists_[cur_c], 0, size_links_per_element_ * curlevel + 1);
}
if ((signed)currObj != -1)
{
if (curlevel < maxlevelcopy)
{
dist_t curdist = fstdistfunc_(data_point, getDataByInternalId(currObj), dist_func_param_);
for (int level = maxlevelcopy; level > curlevel; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
std::unique_lock<std::mutex> lock(link_list_locks_[currObj]);
data = get_linklist(currObj, level);
int size = getListCount(data);
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(data_point, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
}
bool epDeleted = isMarkedDeleted(enterpoint_copy);
for (int level = std::min(curlevel, maxlevelcopy); level >= 0; level--)
{
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = searchBaseLayer(
currObj, data_point, level);
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(enterpoint_copy), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
currObj = mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false);
}
}
else
{
// Do nothing for the first element
enterpoint_node_ = 0;
maxlevel_ = curlevel;
}
//Releasing lock for the maximum level
if (curlevel > maxlevelcopy)
{
enterpoint_node_ = cur_c;
maxlevel_ = curlevel;
}
return cur_c;
};
tableint multi_layer0_addPoint(const void *data_point, labeltype label, int level, float *down_curlevel, float *other_curlevel)
{
tableint cur_c = 0;
{
// Checking if the element with the same label already exists
// if so, updating it *instead* of creating a new element.
std::unique_lock<std::mutex> templock_curr(cur_element_count_guard_);
auto search = label_lookup_.find(label);
if (search != label_lookup_.end())
{
tableint existingInternalId = search->second;
templock_curr.unlock();
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(existingInternalId & (max_update_element_locks - 1))]);
updatePoint(data_point, existingInternalId, 1.0);
return existingInternalId;
}
if (cur_element_count >= max_elements_)
{
throw std::runtime_error("The number of elements exceeds the specified limit");
};
cur_c = cur_element_count;
cur_element_count++;
label_lookup_[label] = cur_c;
}
// Take update lock to prevent race conditions on an element with insertion/update at the same time.
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(cur_c & (max_update_element_locks - 1))]);
std::unique_lock<std::mutex> lock_el(link_list_locks_[cur_c]);
int curlevel = getRandomLevel(mult_);
//int curlevel;
if (level > 0)
curlevel = level;
element_levels_[cur_c] = curlevel;
std::unique_lock<std::mutex> templock(global);
int maxlevelcopy = maxlevel_;
if (curlevel <= maxlevelcopy)
templock.unlock();
tableint currObj = enterpoint_node_;
tableint enterpoint_copy = enterpoint_node_;
memset(data_level0_memory_ + cur_c * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(cur_c), &label, sizeof(labeltype));
memcpy(getDataByInternalId(cur_c), data_point, data_size_);
for (int i = 0; i < num_layer; i++)
{
memset(data_level0_memory_multi_layer[i] + cur_c * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(cur_c, data_level0_memory_multi_layer[i]), &label, sizeof(labeltype));
memcpy(getDataByInternalId(cur_c, data_level0_memory_multi_layer[i]), data_point, data_size_);
}
if (curlevel)
{
linkLists_[cur_c] = (char *)malloc(size_links_per_element_ * curlevel + 1);
if (linkLists_[cur_c] == nullptr)
throw std::runtime_error("Not enough memory: addPoint failed to allocate linklist");
memset(linkLists_[cur_c], 0, size_links_per_element_ * curlevel + 1);
}
if ((signed)currObj != -1)
{
StopH stop_l = StopH();
float up_curlevel = 0;
//float down_curlevel = 0;
//float other_curlevel = 0;
stop_l.reset();
if (curlevel < maxlevelcopy)
{
dist_t curdist = fstdistfunc_(data_point, getDataByInternalId(currObj), dist_func_param_);
for (int level = maxlevelcopy; level > curlevel; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
std::unique_lock<std::mutex> lock(link_list_locks_[currObj]);
data = get_linklist(currObj, level);
int size = getListCount(data);
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(data_point, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
}
up_curlevel = stop_l.getElapsedTimeMicro() / 1e3;
stop_l.reset();
bool epDeleted = isMarkedDeleted(enterpoint_copy);
for (int level = std::min(curlevel, maxlevelcopy); level > 0; level--)
{
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = searchBaseLayer(
currObj, data_point, level);
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(enterpoint_copy), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
currObj = mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false);
}
//down_curlevel = stop_l.getElapsedTimeMicro() / 1e3;
stop_l.reset();
char *data_layer0;
int level = 0;
if (curlevel == 0)
{
int i = rand() % (num_layer + 1);
if (i == 0)
data_layer0 = data_level0_memory_;
else
data_layer0 = data_level0_memory_multi_layer[i - 1];
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
stop_l.reset();
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = multi_layer_searchBaseLayer(
currObj, data_point, level, data_layer0);
*other_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(enterpoint_copy, data_layer0), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
//printf("other_curlevel1 = %f ms\n", other_curlevel);
stop_l.reset();
currObj = multi_layer_mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false, data_layer0);
*down_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
// if(label == (1000000-10)){
// printf("other_curlevel1 = %f ms\n", *other_curlevel);
// printf("down_curlevel1 = %f ms\n", *down_curlevel);
// exit(1);
// }
}
else
{
int vertex;
for (int i = 0; i <= num_layer; i++)
{
vertex = currObj;
if (i == 0)
data_layer0 = data_level0_memory_;
else
data_layer0 = data_level0_memory_multi_layer[i - 1];
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
stop_l.reset();
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = multi_layer_searchBaseLayer(
vertex, data_point, level, data_layer0);
*other_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(enterpoint_copy, data_layer0), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
//other_curlevel = stop_l.getElapsedTimeMicro() / 1e3;
//printf("other_curlevel = %f ms\n", other_curlevel);
stop_l.reset();
vertex = multi_layer_mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false, data_layer0);
*down_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
//printf("down_curlevel = %f ms\n", down_curlevel);
//exit(1);
}
}
//other_curlevel = stop_l.getElapsedTimeMicro() / 1e3;
//printf("other_curlevel = %f ms\n", other_curlevel);
//exit(1);
//printf("2_1\n");
}
else
{
// Do nothing for the first element
enterpoint_node_ = 0;
maxlevel_ = curlevel;
}
//Releasing lock for the maximum level
if (curlevel > maxlevelcopy)
{
enterpoint_node_ = cur_c;
maxlevel_ = curlevel;
}
return cur_c;
};
tableint multi_layer0_addPoint_memory(const void *data_point, labeltype label, int level, float *down_curlevel, float *other_curlevel, std::vector<int> mapping_layer, std::vector<int> mapping_id)
{
tableint cur_c = 0;
printf("label = %d\n", label);
printf("mapping_id = %d\n", mapping_id[label]);
printf("mapping_layer = %d\n", mapping_layer[label]);
{
// Checking if the element with the same label already exists
// if so, updating it *instead* of creating a new element.
std::unique_lock<std::mutex> templock_curr(cur_element_count_guard_);
auto search = label_lookup_.find(label);
if (search != label_lookup_.end())
{
tableint existingInternalId = search->second;
templock_curr.unlock();
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(existingInternalId & (max_update_element_locks - 1))]);
updatePoint(data_point, existingInternalId, 1.0);
return existingInternalId;
}
//printf("h");
if (cur_element_count >= max_elements_)
{
throw std::runtime_error("The number of elements exceeds the specified limit");
};
cur_c = cur_element_count;
cur_element_count++;
label_lookup_[label] = cur_c;
}
// Take update lock to prevent race conditions on an element with insertion/update at the same time.
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(cur_c & (max_update_element_locks - 1))]);
std::unique_lock<std::mutex> lock_el(link_list_locks_[cur_c]);
//int curlevel = getRandomLevel(mult_);
int curlevel;
if (level >= 0)
curlevel = level;
element_levels_[cur_c] = curlevel;
printf("level = %d\n", curlevel);
std::unique_lock<std::mutex> templock(global);
int maxlevelcopy = maxlevel_;
if (curlevel <= maxlevelcopy)
templock.unlock();
tableint currObj = enterpoint_node_;
tableint enterpoint_copy = enterpoint_node_;
if (curlevel)
{
printf("1\n");
linkLists_[cur_c] = (char *)malloc(size_links_per_element_ * curlevel + 1);
if (linkLists_[cur_c] == nullptr)
throw std::runtime_error("Not enough memory: addPoint failed to allocate linklist");
memset(linkLists_[cur_c], 0, size_links_per_element_ * curlevel + 1);
memset(data_level0_memory_ + mapping_id[cur_c] * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(mapping_id[cur_c]), &label, sizeof(labeltype));
memcpy(getDataByInternalId(mapping_id[cur_c]), data_point, data_size_);
for (int i = 0; i < num_layer; i++)
{
memset(data_level0_memory_multi_layer[i] + mapping_id[cur_c] * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(mapping_id[cur_c], data_level0_memory_multi_layer[i]), &label, sizeof(labeltype));
memcpy(getDataByInternalId(mapping_id[cur_c], data_level0_memory_multi_layer[i]), data_point, data_size_);
}
}
else
{
if (mapping_layer[cur_c] == 0)
{
memset(data_level0_memory_ + mapping_id[cur_c] * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(mapping_id[cur_c]), &label, sizeof(labeltype));
memcpy(getDataByInternalId(mapping_id[cur_c]), data_point, data_size_);
}
else
{
memset(data_level0_memory_multi_layer[mapping_layer[cur_c] - 1] + mapping_id[cur_c] * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(mapping_id[cur_c], data_level0_memory_multi_layer[mapping_layer[cur_c] - 1]), &label, sizeof(labeltype));
memcpy(getDataByInternalId(mapping_id[cur_c], data_level0_memory_multi_layer[mapping_layer[cur_c] - 1]), data_point, data_size_);
}
}
if ((signed)currObj != -1)
{
// 3.16 Hu test
StopH stop_l = StopH();
float up_curlevel = 0;
//float down_curlevel = 0;
//float other_curlevel = 0;
stop_l.reset();
if (curlevel < maxlevelcopy)
{
dist_t curdist = fstdistfunc_(data_point, getDataByInternalId(mapping_id[currObj]), dist_func_param_);
for (int level = maxlevelcopy; level > curlevel; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
std::unique_lock<std::mutex> lock(link_list_locks_[currObj]);
data = get_linklist(currObj, level);
int size = getListCount(data);
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(data_point, getDataByInternalId(mapping_id[cand]), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
}
up_curlevel = stop_l.getElapsedTimeMicro() / 1e3;
stop_l.reset();
bool epDeleted = isMarkedDeleted(enterpoint_copy);
for (int level = std::min(curlevel, maxlevelcopy); level > 0; level--)
{
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = dmd_hnsw_searchBaseLayer(
currObj, data_point, level, mapping_id);
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(mapping_id[enterpoint_copy]), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
currObj = dmd_hnsw_mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false, mapping_id);
}
//down_curlevel = stop_l.getElapsedTimeMicro() / 1e3;
//printf("down_curlevel = %f ms\n", down_curlevel);
stop_l.reset();
char *data_layer0;
int level = 0;
if (curlevel == 0)
{
int i = mapping_layer[cur_c];
if (i == 0)
data_layer0 = data_level0_memory_;
else
data_layer0 = data_level0_memory_multi_layer[i - 1];
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
stop_l.reset();
printf("level1 = %d\n", curlevel);
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = dmd_hnsw_multi_layer_searchBaseLayer(
currObj, data_point, level, data_layer0, mapping_id);
printf("level2 = %d\n", curlevel);
*other_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(mapping_id[enterpoint_copy], data_layer0), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
stop_l.reset();
currObj = dmd_hnsw_multi_layer_mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false, data_layer0, mapping_id);
*down_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
printf("level = %d\n", curlevel);
}
else
{
int vertex;
for (int i = 0; i <= num_layer; i++)
{
vertex = currObj;
if (i == 0)
data_layer0 = data_level0_memory_;
else
data_layer0 = data_level0_memory_multi_layer[i - 1];
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
stop_l.reset();
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = dmd_hnsw_multi_layer_searchBaseLayer(
vertex, data_point, level, data_layer0, mapping_id);
*other_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(mapping_id[enterpoint_copy], data_layer0), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
stop_l.reset();
vertex = dmd_hnsw_multi_layer_mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false, data_layer0, mapping_id);
*down_curlevel += stop_l.getElapsedTimeMicro() / 1e3;
}
}
}
else
{
// Do nothing for the first element
enterpoint_node_ = 0;
maxlevel_ = curlevel;
}
//Releasing lock for the maximum level
if (curlevel > maxlevelcopy)
{
enterpoint_node_ = cur_c;
maxlevel_ = curlevel;
}
return cur_c;
};
tableint parallel_addPoint(const void *data_point, labeltype label, int level, int vec_start)
{
tableint cur_c = 0;
{
// Checking if the element with the same label already exists
// if so, updating it *instead* of creating a new element.
std::unique_lock<std::mutex> templock_curr(cur_element_count_guard_);
auto search = label_lookup_.find(label);
if (search != label_lookup_.end())
{
tableint existingInternalId = search->second;
templock_curr.unlock();
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(existingInternalId & (max_update_element_locks - 1))]);
updatePoint(data_point, existingInternalId, 1.0);
return existingInternalId;
}
if (cur_element_count >= max_elements_)
{
throw std::runtime_error("The number of elements exceeds the specified limit");
};
cur_c = cur_element_count;
cur_element_count++;
label_lookup_[label] = cur_c;
}
// Take update lock to prevent race conditions on an element with insertion/update at the same time.
std::unique_lock<std::mutex> lock_el_update(link_list_update_locks_[(cur_c & (max_update_element_locks - 1))]);
std::unique_lock<std::mutex> lock_el(link_list_locks_[cur_c]);
int curlevel = getRandomLevel(mult_);
if (level > 0) //level = -1, 不执行
curlevel = level;
element_levels_[cur_c] = curlevel;
std::unique_lock<std::mutex> templock(global);
int maxlevelcopy = maxlevel_;
if (curlevel <= maxlevelcopy)
templock.unlock();
tableint currObj = enterpoint_node_;
tableint enterpoint_copy = enterpoint_node_;
memset(data_level0_memory_ + cur_c * size_data_per_element_ + offsetLevel0_, 0, size_data_per_element_);
// Initialisation of the data and label
memcpy(getExternalLabeLp(cur_c), &label, sizeof(labeltype));
memcpy(getDataByInternalId(cur_c), data_point, data_size_);
if (curlevel)
{
linkLists_[cur_c] = (char *)malloc(size_links_per_element_ * curlevel + 1);
if (linkLists_[cur_c] == nullptr)
throw std::runtime_error("Not enough memory: addPoint failed to allocate linklist");
memset(linkLists_[cur_c], 0, size_links_per_element_ * curlevel + 1);
}
if ((signed)currObj != -1)
{
if (curlevel < maxlevelcopy)
{
dist_t curdist = fstdistfunc_(data_point, getDataByInternalId(currObj), dist_func_param_);
for (int level = maxlevelcopy; level > curlevel; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
std::unique_lock<std::mutex> lock(link_list_locks_[currObj]);
data = get_linklist(currObj, level);
int size = getListCount(data);
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
//if (cand < vec_start)
//{
dist_t d = fstdistfunc_(data_point, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
//}
}
}
}
}
bool epDeleted = isMarkedDeleted(enterpoint_copy);
for (int level = std::min(curlevel, maxlevelcopy); level >= 0; level--)
{
if (level > maxlevelcopy || level < 0) // possible?
throw std::runtime_error("Level error");
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates = parallel_searchBaseLayer(
currObj, data_point, level, vec_start);
if (epDeleted)
{
top_candidates.emplace(fstdistfunc_(data_point, getDataByInternalId(enterpoint_copy), dist_func_param_), enterpoint_copy);
if (top_candidates.size() > ef_construction_)
top_candidates.pop();
}
currObj = batch_mutuallyConnectNewElement(data_point, cur_c, top_candidates, level, false);
}
}
else
{
// Do nothing for the first element
enterpoint_node_ = 0;
maxlevel_ = curlevel;
}
//Releasing lock for the maximum level
if (curlevel > maxlevelcopy)
{
enterpoint_node_ = cur_c;
maxlevel_ = curlevel;
}
return cur_c;
};
std::priority_queue<std::pair<dist_t, labeltype>>
searchKnn(const void *query_data, size_t k) const
{
std::priority_queue<std::pair<dist_t, labeltype>> result;
if (cur_element_count == 0)
return result;
tableint currObj = enterpoint_node_;
dist_t curdist = fstdistfunc_(query_data, getDataByInternalId(enterpoint_node_), dist_func_param_);
for (int level = maxlevel_; level > 0; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
data = (unsigned int *)get_linklist(currObj, level);
int size = getListCount(data);
metric_hops++;
metric_distance_computations += size;
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::vector<std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>> multi_layer_top_candidates(num_layer + 1);
std::vector<int> element_flag = std::vector<int>(max_elements_);
#pragma omp parallel for num_threads(3)
for (int i = 0; i <= num_layer; i++)
{
//printf("2");
//int i = rand() % 3;
char *data_layer0;
//std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> multi_layer_top_candidates;
if (i == 0)
data_layer0 = data_level0_memory_;
else
data_layer0 = data_level0_memory_multi_layer[i - 1];
if (has_deletions_)
{
//top_candidates = searchBaseLayerST<true, true>(
//currObj, query_data, std::max(ef_, k));
multi_layer_top_candidates[i] = multi_layer_searchBaseLayerST<true, true>(
currObj, query_data, std::max(ef_, k), data_layer0);
}
else
{
//top_candidates = searchBaseLayerST<false, true>(
//currObj, query_data, std::max(ef_, k));
multi_layer_top_candidates[i] = multi_layer_searchBaseLayerST<false, true>(
currObj, query_data, std::max(ef_, k), data_layer0);
}
}
for (int i = 0; i <= num_layer; i++)
{
while (multi_layer_top_candidates[i].size() > 0)
{
if (element_flag[multi_layer_top_candidates[i].top().second] != 1)
{
top_candidates.emplace(multi_layer_top_candidates[i].top().first, multi_layer_top_candidates[i].top().second);
element_flag[multi_layer_top_candidates[i].top().second] = 1;
if (top_candidates.size() > k)
{
top_candidates.pop();
}
}
multi_layer_top_candidates[i].pop();
}
}
while (top_candidates.size() > 0)
{
std::pair<dist_t, tableint> rez = top_candidates.top();
result.push(std::pair<dist_t, labeltype>(rez.first, getExternalLabel(rez.second)));
top_candidates.pop();
}
return result;
};
std::priority_queue<std::pair<dist_t, labeltype>>
test_searchKnn(const void *query_data, size_t k) const
{
int x = 0;
int *step = &x;
FILE *fp = NULL;
fp = fopen("test.txt", "w+");
fprintf(fp, "This is a test!\n");
std::priority_queue<std::pair<dist_t, labeltype>> result;
if (cur_element_count == 0)
return result;
tableint currObj = enterpoint_node_;
dist_t curdist = fstdistfunc_(query_data, getDataByInternalId(enterpoint_node_), dist_func_param_);
(*step)++;
fprintf(fp, "step%d: %d\n", *step, curdist);
for (int level = maxlevel_; level > 0; level--)
{
bool changed = true;
while (changed)
{
changed = false;
unsigned int *data;
data = (unsigned int *)get_linklist(currObj, level);
int size = getListCount(data);
metric_hops++;
metric_distance_computations += size;
tableint *datal = (tableint *)(data + 1);
for (int i = 0; i < size; i++)
{
tableint cand = datal[i];
if (cand < 0 || cand > max_elements_)
throw std::runtime_error("cand error");
dist_t d = fstdistfunc_(query_data, getDataByInternalId(cand), dist_func_param_);
if (d < curdist)
{
curdist = d;
(*step)++;
fprintf(fp, "step%d: %d\n", *step, curdist);
currObj = cand;
changed = true;
}
}
}
}
std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst> top_candidates;
std::vector<std::priority_queue<std::pair<dist_t, tableint>, std::vector<std::pair<dist_t, tableint>>, CompareByFirst>> multi_layer_top_candidates(num_layer + 1);
std::vector<int> element_flag = std::vector<int>(max_elements_);
#pragma omp parallel for num_threads(num_layer + 1)
for (int i = 0; i <= 0; i++)
{
char *data_layer0;
if (i == 0)
data_layer0 = data_level0_memory_;
else
data_layer0 = data_level0_memory_multi_layer[i - 1];
if (has_deletions_)
{
multi_layer_top_candidates[i] = test_multi_layer_searchBaseLayerST<true, true>(
currObj, query_data, std::max(ef_, k), data_layer0, step, fp);
}
else
{
multi_layer_top_candidates[i] = test_multi_layer_searchBaseLayerST<true, true>(
currObj, query_data, std::max(ef_, k), data_layer0, step, fp);
}
}
for (int i = 0; i <= num_layer; i++)
{
while (multi_layer_top_candidates[i].size() > 0)
{
//if (element_flag[multi_layer_top_candidates[i].top().second] != 1)
//{
top_candidates.emplace(multi_layer_top_candidates[i].top().first, multi_layer_top_candidates[i].top().second);
element_flag[multi_layer_top_candidates[i].top().second] = 1;
if (top_candidates.size() > k)
{
top_candidates.pop();
}
//}
multi_layer_top_candidates[i].pop();
}
}
while (top_candidates.size() > 0)
{
std::pair<dist_t, tableint> rez = top_candidates.top();
result.push(std::pair<dist_t, labeltype>(rez.first, getExternalLabel(rez.second)));
top_candidates.pop();
}
fclose(fp);
return result;
};
template <typename Comp>
std::vector<std::pair<dist_t, labeltype>>
searchKnn(const void *query_data, size_t k, Comp comp)
{
std::vector<std::pair<dist_t, labeltype>> result;
if (cur_element_count == 0)
return result;
auto ret = searchKnn(query_data, k);
while (!ret.empty())
{
result.push_back(ret.top());
ret.pop();
}
std::sort(result.begin(), result.end(), comp);
return result;
}
void checkIntegrity()
{
int connections_checked = 0;
std::vector<int> inbound_connections_num(cur_element_count, 0);
for (int i = 0; i < cur_element_count; i++)
{
for (int l = 0; l <= element_levels_[i]; l++)
{
linklistsizeint *ll_cur = get_linklist_at_level(i, l);
int size = getListCount(ll_cur);
tableint *data = (tableint *)(ll_cur + 1);
std::unordered_set<tableint> s;
for (int j = 0; j < size; j++)
{
assert(data[j] > 0);
assert(data[j] < cur_element_count);
assert(data[j] != i);
inbound_connections_num[data[j]]++;
s.insert(data[j]);
connections_checked++;
}
assert(s.size() == size);
}
}
if (cur_element_count > 1)
{
int min1 = inbound_connections_num[0], max1 = inbound_connections_num[0];
for (int i = 0; i < cur_element_count; i++)
{
assert(inbound_connections_num[i] > 0);
min1 = std::min(inbound_connections_num[i], min1);
max1 = std::max(inbound_connections_num[i], max1);
}
std::cout << "Min inbound: " << min1 << ", Max inbound:" << max1 << "\n";
}
std::cout << "integrity ok, checked " << connections_checked << " connections\n";
}
};
}
|
GB_unaryop__lnot_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_uint16
// op(A') function: GB_tran__lnot_fp32_uint16
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_uint16
(
float *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hash_mult_hw.h | #ifndef _HASH_MULT_HW_
#define _HASH_MULT_HW_
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
// #include <immintrin.h>
//#include <zmmintrin.h>
#include <algorithm>
#include "utility.h"
#include "CSR.h"
#include "BIN.h"
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to SH_SIZE
#define IMB_PWMIN 8
#define B_PWMIN 8
#define VEC_LENGTH 8
#define VEC_LENGTH_BIT 3
#define VEC_LENGTH_LONG 4
#define VEC_LENGTH_LONG_BIT 2
template <class IT, class NT>
inline void hash_symbolic_kernel(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, BIN<IT, NT> &bin, unsigned threadCount)
{
#pragma omp parallel num_threads(threadCount)
{
IT i, tid, start_row, end_row;
tid = omp_get_thread_num();
start_row = bin.rows_offset[tid];
end_row = bin.rows_offset[tid + 1];
IT *check = bin.local_hash_table_id[tid];
for (i = start_row; i < end_row; ++i) {
IT j, k, bid;
IT key, hash, old;
IT nz, SH_ROW;
IT t_acol;
nz = 0;
bid = bin.bin_id[i];
if (bid > 0) {
SH_ROW = IMB_PWMIN << (bid - 1);
for (j = 0; j < SH_ROW; ++j) {
check[j] = -1;
}
for (j = arpt[i]; j < arpt[i + 1]; ++j) {
t_acol = acol[j];
for (k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
key = bcol[k];
hash = (key * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (check[hash] == key) {
break;
}
else if (check[hash] == -1) {
check[hash] = key;
nz++;
break;
}
else {
hash = (hash + 1) & (SH_ROW - 1); //hash = (hash + 1) % SH_ROW
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
template <class IT, class NT>
inline void hash_symbolic(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, IT *crpt, BIN<IT, NT> &bin, const IT nrow, IT *nnz, unsigned threadCount)
{
IT i;
hash_symbolic_kernel(arpt, acol, brpt, bcol, bin, threadCount);
/* Set row pointer of matrix C */
scan(bin.row_nz, crpt, nrow + 1);
*nnz = crpt[nrow];
}
template <typename IT, typename NT>
bool sort_less(const pair<IT, NT> &left,const pair<IT, NT> &right)
{
return left.first < right.first;
}
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric(const IT *arpt, const IT *acol, const NT *aval, const IT *brpt, const IT *bcol, const NT *bval, const IT *crpt, IT *ccol, NT *cval,const BIN<IT, NT> &bin, const MultiplyOperation multop, const AddOperation addop, unsigned threadCount)
{
#pragma omp parallel num_threads(threadCount)
{
IT i, tid, start_row, end_row;
IT *shared_check;
NT *shared_value;
tid = omp_get_thread_num();
start_row = bin.rows_offset[tid];
end_row = bin.rows_offset[tid + 1];
shared_check = bin.local_hash_table_id[tid];
shared_value = bin.local_hash_table_val[tid];
for (i = start_row; i < end_row; ++i) {
IT j, k, bid, index;
IT SH_ROW;
IT t_acol, hash, key, offset;
NT t_aval, t_val;
bid = bin.bin_id[i];
if (bid > 0) {
offset = crpt[i];
SH_ROW = B_PWMIN << (bid - 1);
for (j = 0; j < SH_ROW; ++j) {
shared_check[j] = -1;
}
for (j = arpt[i]; j < arpt[i + 1]; ++j) {
t_acol = acol[j];
t_aval = aval[j];
for (k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
t_val = multop(t_aval, bval[k]);
key = bcol[k];
hash = (key * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (shared_check[hash] == key) {
shared_value[hash] = addop(t_val, shared_value[hash]);
break;
}
else if (shared_check[hash] == -1) {
shared_check[hash] = key;
shared_value[hash] = t_val;
break;
}
else {
hash = (hash + 1) & (SH_ROW - 1);
}
}
}
}
index = 0;
if (sortOutput) {
IT nz = crpt[i + 1] - offset;
vector<pair<IT, NT>> p_vec(nz);
for (j = 0; j < SH_ROW; ++j) {
if (shared_check[j] != -1) {
p_vec[index++] = make_pair(shared_check[j], shared_value[j]);
}
}
sort(p_vec.begin(), p_vec.end(), sort_less<IT, NT>);
for (j = 0; j < index; ++j) {
ccol[offset + j] = p_vec[j].first;
cval[offset + j] = p_vec[j].second;
}
}
else {
for (j = 0; j < SH_ROW; ++j) {
if (shared_check[j] != -1) {
ccol[offset + index] = shared_check[j];
cval[offset + index] = shared_value[j];
index++;
}
}
}
}
}
}
}
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop, unsigned threadCount)
{
BIN<IT, NT> bin(a.rows, IMB_PWMIN, threadCount);
c.rows = a.rows;
c.cols = b.cols;
c.zerobased = true;
/* Set max bin */
bin.set_max_bin(a.rowptr, a.colids, b.rowptr, c.rows, c.cols);
/* Create hash table (thread local) */
bin.create_local_hash_table(c.cols);
/* Symbolic Phase */
c.rowptr = my_malloc<IT>(c.rows + 1);
hash_symbolic(a.rowptr, a.colids, b.rowptr, b.colids, c.rowptr, bin, c.rows, &(c.nnz), threadCount);
c.colids = my_malloc<IT>(c.nnz);
c.values = my_malloc<NT>(c.nnz);
// only non-vector case implemented
hash_numeric<sortOutput>(a.rowptr, a.colids, a.values, b.rowptr, b.colids, b.values, c.rowptr, c.colids, c.values, bin, multop, addop, threadCount);
}
#endif
|
DRB090-static-local-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
For a variable declared in a scope inside an OpenMP construct:
* private if the variable has an automatic storage duration
* shared if the variable has a static storage duration.
Dependence pairs:
tmp@73:5 vs. tmp@73:5
tmp@73:5 vs. tmp@74:12
*/
#include<stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int a[len], b[len];
for (i=0;i<len;i++)
{ a[i]=i; b[i]=i;}
/* static storage for a local variable */
#pragma omp parallel
{
static int tmp;
#pragma omp for
for (i=0;i<len;i++)
{
tmp = a[i]+i;
a[i] = tmp;
}
}
/* automatic storage for a local variable */
#pragma omp parallel
{
int tmp;
#pragma omp for
for (i=0;i<len;i++)
{
tmp = b[i]+i;
b[i] = tmp;
}
}
printf("a[50]=%d b[50]=%d\n", a[50], b[50]);
return 0;
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
GetPixelInfo(image,image->colormap+i);
pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1)));
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=OpaqueAlpha;
image->colormap[i].alpha_trait=BlendPixelTrait;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int)
GetPixelInfoIntensity((const Image *) NULL,color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register ssize_t
x;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
GB_subassign_11.c | //------------------------------------------------------------------------------
// GB_subassign_11: C(I,J)<M,repl> += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 11: C(I,J)<M,repl> += scalar ; using S
// M: present
// Mask_comp: false
// C_replace: true
// accum: present
// A: scalar
// S: constructed
// C, M: not bitmap
#include "GB_unused.h"
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_11
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_CLEAR_STATIC_HEADER (S, &S_header) ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_ACCUM_SCALAR ;
GB_GET_S ;
//--------------------------------------------------------------------------
// Method 11: C(I,J)<M,repl> += scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in M+S must be examined. All entries in S
// are modified: if M(i,j)=1 then S(i,j) is used to write to the
// corresponding entry in C. If M(i,j) is not present, or zero, then the
// entry in C is cleared (because of C_replace). If S(i,j) is not present,
// and M(i,j)=1, then the scalar is inserted into C. The only case that
// can be skipped is if neither S nor M is present. As a result, this
// method need not traverse all of IxJ. It can limit its traversal to the
// pattern of M+S.
// Method 09 and Method 11 are very similar.
//--------------------------------------------------------------------------
// Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all M+S
GB_SUBASSIGN_TWO_SLICE (M, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (Sfound && !mij)
{
// S (i,j) is present but M (i,j) is false
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && mij)
{
// S (i,j) present and M (i,j) is true
GB_C_S_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_C_S_LOOKUP ;
if (GB_mcast (Mx, pM, msize))
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list S (:,j) has entries. List M (:,j) exhausted.
while (pS < pS_end)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-----------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iM = GBI (Mi, pM, Mvlen) ;
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
BFEgg_fmt_plug.c | /*
* This file is part of Eggdrop blowfish patch for John The Ripper.
* Copyright (c) 2002 by Sun-Zero <sun-zero at freemail.hu>
* This is a free software distributable under terms of the GNU GPL.
*
* This format has collisions for repeated patterns (eg. "1" vs. "11",
* or "hey" vs. "heyheyheyhey") - you can run it with --keep-guessing
* if you'd like to see alternative plaintexts.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_BFEgg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_BFEgg);
#else
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "common.h"
#include "blowfish.c"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// Tuning on AMD A8 4500M laptop, cygwin64 with OMP(4x) -test=5
// 4 = 44330 (original)
// 16 = 54760
// 24 = 56151
// 32 = 56216
// 64 = 57770
// 96 = 57888
// 128 = 58016 > instant -test=0
// 256 = 58282 // from here on, not enough gain to matter.
// 512 = 58573
// 1024= 59464
// 4096= 59244 > 1s -test=0
#ifndef OMP_SCALE
#define OMP_SCALE 128
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "bfegg"
#define FORMAT_NAME "Eggdrop"
#define ALGORITHM_NAME "Blowfish 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_MIN_LENGTH 1
#define PLAINTEXT_LENGTH 72
#define CIPHERTEXT_LENGTH 13
#define BINARY_SIZE 7
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"+9F93o1OxwgK1", "123456"},
{"+C/.8o.Wuph9.", "qwerty"},
{"+EEHgy/MBLDd0", "walkman"},
{"+vPBrs07OTXE/", "tesztuser"},
{"+zIvO/1nDsd9.", "654321"},
{"+V6ZOx0rVGWT0", "1"},
{"+V6ZOx0rVGWT0", "11"},
{"+Obytd.zXYjH/", "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[(BINARY_SIZE + 1) / sizeof(uint32_t)];
#if defined (_MSC_VER) || defined (__MINGW32__)
// in VC, _atoi64 is a function.
#define _atoi64 JtR_atoi64
#endif
static const char _itoa64[] = "./0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
static char _atoi64[0x100];
static int valid(char *ciphertext, struct fmt_main *self) {
char *pos;
if (*ciphertext != '+') return 0;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH) return 0;
for (pos = &ciphertext[1]; atoi64[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos || pos - ciphertext != CIPHERTEXT_LENGTH) return 0;
return 1;
}
void init(struct fmt_main *self) {
const char *pos;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
memset(_atoi64, 0x7F, sizeof(_atoi64));
for (pos = _itoa64; pos <= &_itoa64[63]; pos++)
_atoi64[ARCH_INDEX(*pos)] = pos - _itoa64;
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
/* The base64 is flawed - we just mimic flaws from the original code */
static void *get_binary(char *ciphertext)
{
static union toalign {
unsigned char c[BINARY_SIZE];
uint32_t a[1];
} a;
unsigned char *out = a.c;
uint32_t value;
char *pos;
pos = ciphertext + 1;
value = (uint32_t)_atoi64[ARCH_INDEX(pos[0])] |
((uint32_t)_atoi64[ARCH_INDEX(pos[1])] << 6) |
((uint32_t)_atoi64[ARCH_INDEX(pos[2])] << 12) |
((uint32_t)_atoi64[ARCH_INDEX(pos[3])] << 18);
out[0] = value;
out[1] = value >> 8;
out[2] = value >> 16;
out[3] = _atoi64[ARCH_INDEX(pos[4])] |
(_atoi64[ARCH_INDEX(pos[5])] << 6);
pos += 6;
value = (uint32_t)_atoi64[ARCH_INDEX(pos[0])] |
((uint32_t)_atoi64[ARCH_INDEX(pos[1])] << 6) |
((uint32_t)_atoi64[ARCH_INDEX(pos[2])] << 12) |
((uint32_t)_atoi64[ARCH_INDEX(pos[3])] << 18);
out[4] = value;
out[5] = value >> 8;
out[6] = value >> 16;
return (void *)out;
}
static void set_key(char *key, int index) {
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
}
static char *get_key(int index) {
return saved_key[index];
}
static int cmp_all(void *binary, int count) {
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], 4))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
/*if (saved_key[index][0] == '\0') {
zerolengthkey = 1;
} else {
zerolengthkey = 0; */
if (saved_key[index][0] != 0)
blowfish_encrypt_pass(saved_key[index],
(char*)crypt_out[index]);
}
return count;
}
struct fmt_main fmt_BFEgg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_MIN_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
geopm_sched.c | /*
* Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <pthread.h>
#include <errno.h>
#include <string.h>
#include <signal.h>
#include "geopm_sched.h"
#include "geopm_error.h"
#include "config.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static volatile unsigned g_is_popen_complete = 0;
static struct sigaction g_popen_complete_signal_action;
static void geopm_sched_popen_complete(int signum)
{
if (signum == SIGCHLD) {
g_is_popen_complete = 1;
}
}
int geopm_sched_popen(const char *cmd, FILE **fid)
{
int err = 0;
*fid = NULL;
struct sigaction save_action;
g_popen_complete_signal_action.sa_handler = geopm_sched_popen_complete;
sigemptyset(&g_popen_complete_signal_action.sa_mask);
g_popen_complete_signal_action.sa_flags = 0;
err = sigaction(SIGCHLD, &g_popen_complete_signal_action, &save_action);
if (!err) {
*fid = popen(cmd, "r");
while (*fid && !g_is_popen_complete) {
}
g_is_popen_complete = 0;
sigaction(SIGCHLD, &save_action, NULL);
}
if (!err && *fid == NULL) {
err = errno ? errno : GEOPM_ERROR_RUNTIME;
}
return err;
}
int geopm_sched_num_cpu(void)
{
return sysconf(_SC_NPROCESSORS_CONF);
}
int geopm_sched_get_cpu(void)
{
return sched_getcpu();
}
static pthread_once_t g_proc_cpuset_once = PTHREAD_ONCE_INIT;
static cpu_set_t *g_proc_cpuset = NULL;
static size_t g_proc_cpuset_size = 0;
/* If /proc/self/status is usable and correct then parse this file to
determine the process affinity. */
#ifdef GEOPM_PROCFS
int geopm_sched_proc_cpuset_helper(int num_cpu, uint32_t *proc_cpuset, FILE *fid)
{
const char *key = "Cpus_allowed:";
const size_t key_len = strlen(key);
const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0);
int err = 0;
char *line = NULL;
size_t line_len = 0;
int read_idx = 0;
while ((getline(&line, &line_len, fid)) != -1) {
if (strncmp(line, key, key_len) == 0) {
char *line_ptr = line + key_len;
/* On some systems we have seen the mask padded with zeros
beyond the number of online CPUs. Deal with this by
skipping extra leading 32 bit masks */
int num_comma = 0;
char *comma_ptr = line_ptr;
while ((comma_ptr = strchr(comma_ptr, ','))) {
++comma_ptr;
++num_comma;
}
if (num_comma > num_read - 1) {
num_comma -= num_read - 1;
for (int i = 0; !err && i < num_comma; ++i) {
line_ptr = strchr(line_ptr, ',');
if (!line_ptr) {
err = GEOPM_ERROR_LOGIC;
}
else {
++line_ptr;
}
}
}
for (read_idx = num_read - 1; !err && read_idx >= 0; --read_idx) {
int num_match = sscanf(line_ptr, "%x", proc_cpuset + read_idx);
if (num_match != 1) {
err = GEOPM_ERROR_RUNTIME;
}
else {
line_ptr = strchr(line_ptr, ',');
if (read_idx != 0 && line_ptr == NULL) {
err = GEOPM_ERROR_RUNTIME;
}
else {
++line_ptr;
}
}
}
}
}
if (line) {
free(line);
}
if (read_idx != -1) {
err = GEOPM_ERROR_RUNTIME;
}
return err;
}
static void geopm_proc_cpuset_once(void)
{
const char *status_path = "/proc/self/status";
const int num_cpu = geopm_sched_num_cpu();
const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0);
int err = 0;
uint32_t *proc_cpuset = NULL;
FILE *fid = NULL;
g_proc_cpuset = CPU_ALLOC(num_cpu);
if (g_proc_cpuset == NULL) {
err = ENOMEM;
}
if (!err) {
g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu);
proc_cpuset = calloc(num_read, sizeof(*proc_cpuset));
if (proc_cpuset == NULL) {
err = ENOMEM;
}
}
if (!err) {
fid = fopen(status_path, "r");
if (!fid) {
err = errno ? errno : GEOPM_ERROR_RUNTIME;
}
}
if (!err) {
err = geopm_sched_proc_cpuset_helper(num_cpu, proc_cpuset, fid);
fclose(fid);
}
if (!err) {
/* cpu_set_t is managed in units of unsigned long, and may have extra
* bits at the end with undefined values. If that happens,
* g_proc_cpuset_size may be greater than the size of proc_cpuset,
* resulting in reading past the end of proc_cpuset. Avoid this by
* only copying the number of bytes needed to contain the mask. Zero
* the destination first, since it may not be fully overwritten.
*
* See the CPU_SET(3) man page for more details about cpu_set_t.
*/
CPU_ZERO_S(g_proc_cpuset_size, g_proc_cpuset);
memcpy(g_proc_cpuset, proc_cpuset, num_read * sizeof(*proc_cpuset));
}
else if (g_proc_cpuset) {
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
}
}
if (proc_cpuset) {
free(proc_cpuset);
}
}
/* If /proc/self/status is not available spawn a pthread requesting an
open affinity mask and then have the thread query the affinity mask
enforced by the OS using sched_getaffinity(). */
#else /* GEOPM_PROCFS */
static void *geopm_proc_cpuset_pthread(void *arg)
{
void *result = NULL;
int err = sched_getaffinity(0, g_proc_cpuset_size, g_proc_cpuset);
if (err) {
result = (void *)(size_t)(errno ? errno : GEOPM_ERROR_RUNTIME);
}
return result;
}
static void geopm_proc_cpuset_once(void)
{
int err = 0;
int num_cpu = geopm_sched_num_cpu();
pthread_t tid;
pthread_attr_t attr;
g_proc_cpuset = CPU_ALLOC(num_cpu);
if (g_proc_cpuset == NULL) {
err = ENOMEM;
}
if (!err) {
g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu);
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
}
err = pthread_attr_init(&attr);
}
if (!err) {
err = pthread_attr_setaffinity_np(&attr, g_proc_cpuset_size, g_proc_cpuset);
}
if (!err) {
err = pthread_create(&tid, &attr, geopm_proc_cpuset_pthread, NULL);
}
if (!err) {
void *result = NULL;
err = pthread_join(tid, &result);
if (!err && result) {
err = (int)(size_t)result;
}
}
if (err && err != ENOMEM) {
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset);
}
}
if (!err) {
err = pthread_attr_destroy(&attr);
}
}
#endif /* GEOPM_PROCFS */
int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset)
{
int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
int sched_num_cpu = geopm_sched_num_cpu();
size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu);
if (!err && cpuset_size < g_proc_cpuset_size) {
err = GEOPM_ERROR_INVALID;
}
if (!err) {
/* Copy up to the smaller of the sizes to avoid buffer overruns. Zero
* the destination set first, since it may not be fully overwritten
*/
CPU_ZERO_S(cpuset_size, proc_cpuset);
memcpy(proc_cpuset, g_proc_cpuset, g_proc_cpuset_size);
for (int i = sched_num_cpu; i < num_cpu; ++i) {
CPU_CLR_S(i, cpuset_size, proc_cpuset);
}
}
return err;
}
int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp)
{
/*! @brief Function that returns a cpuset that has bits set for
all CPUs enabled for the process which are not used by
OpenMP. Rather than returning an empty mask, if all
CPUs allocated for the process are used by OpenMP, then
the woomp mask will have all bits set. */
int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once);
int sched_num_cpu = geopm_sched_num_cpu();
size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu);
if (!err && !g_proc_cpuset) {
err = ENOMEM;
}
if (!err && req_alloc_size < g_proc_cpuset_size) {
err = EINVAL;
}
if (!err) {
/* Copy the process CPU mask into the output. */
CPU_ZERO_S(req_alloc_size, woomp);
memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size);
/* Start an OpenMP parallel region and have each thread clear
its bit from the mask. */
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
#pragma omp critical
{
int cpu_index = sched_getcpu();
if (cpu_index != -1 && cpu_index < num_cpu) {
/* Clear the bit for this OpenMP thread's CPU. */
CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp);
}
else {
err = errno ? errno : GEOPM_ERROR_LOGIC;
}
} /* end pragma omp critical */
} /* end pragma omp parallel */
#endif /* _OPENMP */
}
if (!err) {
for (int i = sched_num_cpu; i < num_cpu; ++i) {
CPU_CLR_S(i, req_alloc_size, woomp);
}
}
if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) {
/* If all CPUs are used by the OpenMP gang, then leave the
mask open and allow the Linux scheduler to choose. */
for (int i = 0; i < num_cpu; ++i) {
CPU_SET_S(i, g_proc_cpuset_size, woomp);
}
}
return err;
}
|
core_ztsqrt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
// This will be swapped during the automatic code generation.
#undef REAL
#define COMPLEX
/***************************************************************************//**
*
* @ingroup core_tsqrt
*
* Computes a QR factorization of a rectangular matrix
* formed by coupling an n-by-n upper triangular tile A1
* on top of an m-by-n tile A2:
*
* | A1 | = Q * R
* | A2 |
*
*******************************************************************************
*
* @param[in] m
* The number of columns of the tile A2. m >= 0.
*
* @param[in] n
* The number of rows of the tile A1.
* The number of columns of the tiles A1 and A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the n-by-n tile A1.
* On exit, the elements on and above the diagonal of the array
* contain the n-by-n upper trapezoidal tile R;
* the elements below the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. LDA1 >= max(1,N).
*
* @param[in,out] A2
* On entry, the m-by-n tile A2.
* On exit, all the elements with the array tau, represent
* the unitary tile Q as a product of elementary reflectors
* (see Further Details).
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-n triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length n.
*
* @param work
* Auxiliary workspace array of length ib*n.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_ztsqrt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_complex64_t *tau,
plasma_complex64_t *work)
{
// Check input arguments.
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
plasma_coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
plasma_coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -11;
}
// quick return
if (m == 0 || n == 0 || ib == 0)
return PlasmaSuccess;
static plasma_complex64_t zone = 1.0;
static plasma_complex64_t zzero = 0.0;
for (int ii = 0; ii < n; ii += ib) {
int sb = imin(n-ii, ib);
for (int i = 0; i < sb; i++) {
// Generate elementary reflector H( II*IB+I ) to annihilate
// A( II*IB+I:M, II*IB+I ).
LAPACKE_zlarfg_work(m+1, &A1[lda1*(ii+i)+ii+i], &A2[lda2*(ii+i)], 1,
&tau[ii+i]);
if (ii+i+1 < n) {
// Apply H( II*IB+I ) to A( II*IB+I:M, II*IB+I+1:II*IB+IB )
// from the left.
plasma_complex64_t alpha = -conj(tau[ii+i]);
cblas_zcopy(sb-i-1, &A1[lda1*(ii+i+1)+(ii+i)], lda1, work, 1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(sb-i-1, work, 1);
#endif
cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)Plasma_ConjTrans,
m, sb-i-1,
CBLAS_SADDR(zone), &A2[lda2*(ii+i+1)], lda2,
&A2[lda2*(ii+i)], 1,
CBLAS_SADDR(zone), work, 1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(sb-i-1, work, 1);
#endif
cblas_zaxpy(sb-i-1, CBLAS_SADDR(alpha), work, 1,
&A1[lda1*(ii+i+1)+ii+i], lda1);
#ifdef COMPLEX
LAPACKE_zlacgv_work(sb-i-1, work, 1);
#endif
cblas_zgerc(CblasColMajor,
m, sb-i-1,
CBLAS_SADDR(alpha), &A2[lda2*(ii+i)], 1,
work, 1,
&A2[lda2*(ii+i+1)], lda2);
}
// Calculate T.
plasma_complex64_t alpha = -tau[ii+i];
cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)Plasma_ConjTrans,
m, i,
CBLAS_SADDR(alpha), &A2[lda2*ii], lda2,
&A2[lda2*(ii+i)], 1,
CBLAS_SADDR(zzero), &T[ldt*(ii+i)], 1);
cblas_ztrmv(CblasColMajor, (CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans,
(CBLAS_DIAG)PlasmaNonUnit,
i,
&T[ldt*ii], ldt,
&T[ldt*(ii+i)], 1);
T[ldt*(ii+i)+i] = tau[ii+i];
}
if (n > ii+sb) {
plasma_core_ztsmqr(PlasmaLeft, Plasma_ConjTrans,
sb, n-(ii+sb), m, n-(ii+sb), ib, ib,
&A1[lda1*(ii+sb)+ii], lda1,
&A2[lda2*(ii+sb)], lda2,
&A2[lda2*ii], lda2,
&T[ldt*ii], ldt,
work, sb);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_ztsqrt(int m, int n, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
plasma_complex64_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*n])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]);
// Call the kernel.
int info = plasma_core_ztsqrt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+n);
if (info != PlasmaSuccess) {
plasma_error("core_ztsqrt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
GB_binop__gt_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int64)
// A*D function (colscale): GB (_AxD__gt_int64)
// D*A function (rowscale): GB (_DxB__gt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int64)
// C=scalar+B GB (_bind1st__gt_int64)
// C=scalar+B' GB (_bind1st_tran__gt_int64)
// C=A+scalar GB (_bind2nd__gt_int64)
// C=A'+scalar GB (_bind2nd_tran__gt_int64)
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_INT64 || GxB_NO_GT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
xgboost_data.h | #ifndef XGBOOST_DATA_H
#define XGBOOST_DATA_H
/*!
* \file xgboost_data.h
* \brief the input data structure for gradient boosting
* \author Tianqi Chen: tianqi.tchen@gmail.com
*/
#include <vector>
#include <climits>
#include "../utils/xgboost_utils.h"
#include "../utils/xgboost_stream.h"
#include "../utils/xgboost_matrix_csr.h"
namespace xgboost{
namespace booster{
/*! \brief interger type used in boost */
typedef int bst_int;
/*! \brief unsigned interger type used in boost */
typedef unsigned bst_uint;
/*! \brief float type used in boost */
typedef float bst_float;
/*! \brief debug option for booster */
const bool bst_debug = false;
};
};
namespace xgboost{
namespace booster{
/**
* \brief This is a interface, defining the way to access features,
* by column or by row. This interface is used to make implementation
* of booster does not depend on how feature is stored.
*
* Why template instead of virtual class: for efficiency
* feature matrix is going to be used by most inner loop of the algorithm
*
* \tparam Derived type of actual implementation
* \sa FMatrixS: most of time FMatrixS is sufficient, refer to it if you find it confusing
*/
template<typename Derived>
struct FMatrix{
public:
/*! \brief exmaple iterator over one row */
struct RowIter{
/*!
* \brief move to next position
* \return whether there is element in next position
*/
inline bool Next(void);
/*! \return feature index in current position */
inline bst_uint findex(void) const;
/*! \return feature value in current position */
inline bst_float fvalue(void) const;
};
/*! \brief example iterator over one column */
struct ColIter{
/*!
* \brief move to next position
* \return whether there is element in next position
*/
inline bool Next(void);
/*! \return row index of current position */
inline bst_uint rindex(void) const;
/*! \return feature value in current position */
inline bst_float fvalue(void) const;
};
/*! \brief backward iterator over column */
struct ColBackIter : public ColIter {};
public:
/*!
* \brief get number of rows
* \return number of rows
*/
inline size_t NumRow(void) const;
/*!
* \brief get number of columns
* \return number of columns
*/
inline size_t NumCol(void) const;
/*!
* \brief get row iterator
* \param ridx row index
* \return row iterator
*/
inline RowIter GetRow(size_t ridx) const;
/*!
* \brief get number of column groups, this ise used together with GetRow( ridx, gid )
* \return number of column group
*/
inline unsigned NumColGroup(void) const{
return 1;
}
/*!
* \brief get row iterator, return iterator of specific column group
* \param ridx row index
* \param gid colmun group id
* \return row iterator, only iterates over features of specified column group
*/
inline RowIter GetRow(size_t ridx, unsigned gid) const;
/*! \return whether column access is enabled */
inline bool HaveColAccess(void) const;
/*!
* \brief get column iterator, the columns must be sorted by feature value
* \param ridx column index
* \return column iterator
*/
inline ColIter GetSortedCol(size_t ridx) const;
/*!
* \brief get column backward iterator, starts from biggest fvalue, and iterator back
* \param ridx column index
* \return reverse column iterator
*/
inline ColBackIter GetReverseSortedCol(size_t ridx) const;
};
};
};
namespace xgboost{
namespace booster{
/*!
* \brief feature matrix to store training instance, in sparse CSR format
*/
class FMatrixS : public FMatrix<FMatrixS>{
public:
/*! \brief one entry in a row */
struct REntry{
/*! \brief feature index */
bst_uint findex;
/*! \brief feature value */
bst_float fvalue;
/*! \brief constructor */
REntry(void){}
/*! \brief constructor */
REntry(bst_uint findex, bst_float fvalue) : findex(findex), fvalue(fvalue){}
inline static bool cmp_fvalue(const REntry &a, const REntry &b){
return a.fvalue < b.fvalue;
}
};
/*! \brief one row of sparse feature matrix */
struct Line{
/*! \brief array of feature index */
const REntry *data_;
/*! \brief size of the data */
bst_uint len;
/*! \brief get k-th element */
inline const REntry& operator[](unsigned i) const{
return data_[i];
}
};
/*! \brief row iterator */
struct RowIter{
const REntry *dptr_, *end_;
RowIter(const REntry* dptr, const REntry* end)
:dptr_(dptr), end_(end){}
inline bool Next(void){
if (dptr_ == end_) return false;
else{
++dptr_; return true;
}
}
inline bst_uint findex(void) const{
return dptr_->findex;
}
inline bst_float fvalue(void) const{
return dptr_->fvalue;
}
};
/*! \brief column iterator */
struct ColIter : public RowIter{
ColIter(const REntry* dptr, const REntry* end)
:RowIter(dptr, end){}
inline bst_uint rindex(void) const{
return this->findex();
}
};
/*! \brief reverse column iterator */
struct ColBackIter : public ColIter{
ColBackIter(const REntry* dptr, const REntry* end)
:ColIter(dptr, end){}
// shadows RowIter::Next
inline bool Next(void){
if (dptr_ == end_) return false;
else{
--dptr_; return true;
}
}
};
public:
/*! \brief constructor */
FMatrixS(void){ this->Clear(); }
/*! \brief get number of rows */
inline size_t NumRow(void) const{
return row_ptr_.size() - 1;
}
/*!
* \brief get number of nonzero entries
* \return number of nonzero entries
*/
inline size_t NumEntry(void) const{
return row_data_.size();
}
/*! \brief clear the storage */
inline void Clear(void){
row_ptr_.clear();
row_ptr_.push_back(0);
row_data_.clear();
col_ptr_.clear();
col_data_.clear();
}
/*! \brief get sparse part of current row */
inline Line operator[](size_t sidx) const{
Line sp;
utils::Assert(!bst_debug || sidx < this->NumRow(), "row id exceed bound");
sp.len = static_cast<bst_uint>(row_ptr_[sidx + 1] - row_ptr_[sidx]);
sp.data_ = &row_data_[row_ptr_[sidx]];
return sp;
}
/*!
* \brief add a row to the matrix, with data stored in STL container
* \param findex feature index
* \param fvalue feature value
* \param fstart start bound of feature
* \param fend end bound range of feature
* \return the row id added line
*/
inline size_t AddRow(const std::vector<bst_uint> &findex,
const std::vector<bst_float> &fvalue,
unsigned fstart = 0, unsigned fend = UINT_MAX){
utils::Assert(findex.size() == fvalue.size());
unsigned cnt = 0;
for (size_t i = 0; i < findex.size(); i++){
if (findex[i] < fstart || findex[i] >= fend) continue;
row_data_.push_back(REntry(findex[i], fvalue[i]));
cnt++;
}
row_ptr_.push_back(row_ptr_.back() + cnt);
return row_ptr_.size() - 2;
}
/*! \brief get row iterator*/
inline RowIter GetRow(size_t ridx) const{
utils::Assert(!bst_debug || ridx < this->NumRow(), "row id exceed bound");
return RowIter(&row_data_[row_ptr_[ridx]] - 1, &row_data_[row_ptr_[ridx + 1]] - 1);
}
/*! \brief get row iterator*/
inline RowIter GetRow(size_t ridx, unsigned gid) const{
utils::Assert(gid == 0, "FMatrixS only have 1 column group");
return FMatrixS::GetRow(ridx);
}
public:
/*! \return whether column access is enabled */
inline bool HaveColAccess(void) const{
return col_ptr_.size() != 0 && col_data_.size() == row_data_.size();
}
/*! \brief get number of colmuns */
inline size_t NumCol(void) const{
utils::Assert(this->HaveColAccess());
return col_ptr_.size() - 1;
}
/*! \brief get col iterator*/
inline ColIter GetSortedCol(size_t cidx) const{
utils::Assert(!bst_debug || cidx < this->NumCol(), "col id exceed bound");
return ColIter(&col_data_[col_ptr_[cidx]] - 1, &col_data_[col_ptr_[cidx + 1]] - 1);
}
/*! \brief get col iterator */
inline ColBackIter GetReverseSortedCol(size_t cidx) const{
utils::Assert(!bst_debug || cidx < this->NumCol(), "col id exceed bound");
return ColBackIter(&col_data_[col_ptr_[cidx + 1]], &col_data_[col_ptr_[cidx]]);
}
/*!
* \brief intialize the data so that we have both column and row major
* access, call this whenever we need column access
*/
inline void InitData(void){
utils::SparseCSRMBuilder<REntry> builder(col_ptr_, col_data_);
builder.InitBudget(0);
for (size_t i = 0; i < this->NumRow(); i++){
for (RowIter it = this->GetRow(i); it.Next();){
builder.AddBudget(it.findex());
}
}
builder.InitStorage();
for (size_t i = 0; i < this->NumRow(); i++){
for (RowIter it = this->GetRow(i); it.Next();){
builder.PushElem(it.findex(), REntry((bst_uint)i, it.fvalue()));
}
}
// sort columns
unsigned ncol = static_cast<unsigned>(this->NumCol());
#pragma omp parallel for schedule(static)
for (unsigned i = 0; i < ncol; i++){
std::sort(&col_data_[col_ptr_[i]], &col_data_[col_ptr_[i + 1]], REntry::cmp_fvalue);
}
}
/*!
* \brief save data to binary stream
* note: since we have size_t in ptr,
* the function is not consistent between 64bit and 32bit machine
* \param fo output stream
*/
inline void SaveBinary(utils::IStream &fo) const{
FMatrixS::SaveBinary(fo, row_ptr_, row_data_);
int col_access = this->HaveColAccess() ? 1 : 0;
fo.Write(&col_access, sizeof(int));
if (col_access != 0){
FMatrixS::SaveBinary(fo, col_ptr_, col_data_);
}
}
/*!
* \brief load data from binary stream
* note: since we have size_t in ptr,
* the function is not consistent between 64bit and 32bit machin
* \param fi input stream
*/
inline void LoadBinary(utils::IStream &fi){
FMatrixS::LoadBinary(fi, row_ptr_, row_data_);
int col_access;
fi.Read(&col_access, sizeof(int));
if (col_access != 0){
FMatrixS::LoadBinary(fi, col_ptr_, col_data_);
}else{
this->InitData();
}
}
/*!
* \brief load from text file
* \param fi input file pointer
*/
inline void LoadText(FILE *fi){
this->Clear();
int ninst;
while (fscanf(fi, "%d", &ninst) == 1){
std::vector<booster::bst_uint> findex;
std::vector<booster::bst_float> fvalue;
while (ninst--){
unsigned index; float value;
utils::Assert(fscanf(fi, "%u:%f", &index, &value) == 2, "load Text");
findex.push_back(index); fvalue.push_back(value);
}
this->AddRow(findex, fvalue);
}
// initialize column support as well
this->InitData();
}
private:
/*!
* \brief save data to binary stream
* \param fo output stream
* \param ptr pointer data
* \param data data content
*/
inline static void SaveBinary(utils::IStream &fo,
const std::vector<size_t> &ptr,
const std::vector<REntry> &data){
size_t nrow = ptr.size() - 1;
fo.Write(&nrow, sizeof(size_t));
fo.Write(&ptr[0], ptr.size() * sizeof(size_t));
if (data.size() != 0){
fo.Write(&data[0], data.size() * sizeof(REntry));
}
}
/*!
* \brief load data from binary stream
* \param fi input stream
* \param ptr pointer data
* \param data data content
*/
inline static void LoadBinary(utils::IStream &fi,
std::vector<size_t> &ptr,
std::vector<REntry> &data){
size_t nrow;
utils::Assert(fi.Read(&nrow, sizeof(size_t)) != 0, "Load FMatrixS");
ptr.resize(nrow + 1);
utils::Assert(fi.Read(&ptr[0], ptr.size() * sizeof(size_t)) != 0, "Load FMatrixS");
data.resize(ptr.back());
if (data.size() != 0){
utils::Assert(fi.Read(&data[0], data.size() * sizeof(REntry)) != 0, "Load FMatrixS");
}
}
public:
/*! \brief row pointer of CSR sparse storage */
std::vector<size_t> row_ptr_;
/*! \brief data in the row */
std::vector<REntry> row_data_;
/*! \brief column pointer of CSC format */
std::vector<size_t> col_ptr_;
/*! \brief column datas */
std::vector<REntry> col_data_;
};
};
};
#endif
|
trmv_x_sky_n_lo_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
}
for(ALPHA_INT c = 0; c < n; ++c)
{
const ALPHA_INT col_start = A->pointers[c];
const ALPHA_INT col_end = A->pointers[c + 1];
ALPHA_INT col_indx = 1;
for(ALPHA_INT ai = col_start; ai < col_end; ++ai)
{
ALPHA_INT col_eles = col_end - col_start;
ALPHA_INT r = c - col_eles + col_indx;
ALPHA_Number t;
alpha_mul(t, alpha, A->values[ai]);
alpha_madde(y[r], t, x[c]);
col_indx ++;
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
ten_tusscher_2004_epi_S1_1.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S1_1.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
FourierTransform.h | #pragma once
#include <omp.h>
#include "ScalarField.h"
#include "Grid.h"
#ifdef __USE_FFT__
#include "fftw3.h"
#endif
namespace pfc
{
enum Field {
E, B, J
};
enum Coordinate {
x, y, z
};
enum FourierTransformDirection {
RtoC, CtoR
};
class FourierTransformGrid {
#ifdef __USE_FFT__
Int3 size;
fftw_plan plans[2][3][3]; // RtoC/CtoR, field, coordinate
ScalarField<FP>* realFields[3][3];
ScalarField<complexFP>* complexFields[3][3];
#endif
public:
#ifdef __USE_FFT__
FourierTransformGrid()
{
for (int f = 0; f < 3; f++)
for (int d = 0; d < 3; d++) {
plans[FourierTransformDirection::RtoC][f][d] = 0;
plans[FourierTransformDirection::CtoR][f][d] = 0;
}
}
template<typename GridTypes gridType>
void initialize(Grid<FP, gridType>* gridFP, Grid<complexFP, gridType>* gridCFP)
{
size = gridFP->numCells;
realFields[E][x] = &(gridFP->Ex), realFields[E][y] = &(gridFP->Ey), realFields[E][z] = &(gridFP->Ez);
realFields[B][x] = &(gridFP->Bx), realFields[B][y] = &(gridFP->By), realFields[B][z] = &(gridFP->Bz);
realFields[J][x] = &(gridFP->Jx), realFields[J][y] = &(gridFP->Jy), realFields[J][z] = &(gridFP->Jz);
complexFields[E][x] = &(gridCFP->Ex), complexFields[E][y] = &(gridCFP->Ey), complexFields[E][z] = &(gridCFP->Ez);
complexFields[B][x] = &(gridCFP->Bx), complexFields[B][y] = &(gridCFP->By), complexFields[B][z] = &(gridCFP->Bz);
complexFields[J][x] = &(gridCFP->Jx), complexFields[J][y] = &(gridCFP->Jy), complexFields[J][z] = &(gridCFP->Jz);
createPlans();
}
~FourierTransformGrid() {
destroyPlans();
}
void doDirectFourierTransform(Field field, Coordinate coord)
{
fftw_execute(plans[FourierTransformDirection::RtoC][field][coord]);
}
void doInverseFourierTransform(Field field, Coordinate coord)
{
fftw_execute(plans[FourierTransformDirection::CtoR][field][coord]);
ScalarField<FP>& res = *realFields[field][coord];
#pragma omp parallel for
for (int i = 0; i < size.x; i++)
for (int j = 0; j < size.y; j++)
//#pragma omp simd
for (int k = 0; k < size.z; k++)
res(i, j, k) /= (FP)size.x*size.y*size.z;
}
#else
FourierTransformGrid() {}
template<typename GridTypes gridType>
FourierTransformGrid(Grid<FP, gridType>* gridFP, Grid<complexFP, gridType>* gridCFP) {}
void doDirectFourierTransform(Field field, Coordinate coord) {}
void doInverseFourierTransform(Field field, Coordinate coord) {}
#endif
void doFourierTransform(Field field, Coordinate coord,
FourierTransformDirection direction)
{
switch (direction) {
case RtoC:
doDirectFourierTransform(field, coord);
break;
case CtoR:
doInverseFourierTransform(field, coord);
break;
default:
break;
}
}
private:
#ifdef __USE_FFT__
void createPlans()
{
int Nx = size.x, Ny = size.y, Nz = size.z;
for (int f = 0; f < 3; f++)
for (int d = 0; d < 3; d++) {
ScalarField<FP>& arrD = *(realFields[f][d]);
ScalarField<complexFP>& arrC = *(complexFields[f][d]);
#ifdef __USE_OMP__
fftw_plan_with_nthreads(omp_get_max_threads());
#endif
plans[FourierTransformDirection::RtoC][f][d] = fftw_plan_dft_r2c_3d(Nx, Ny, Nz,
&(arrD(0, 0, 0)), (fftw_complex*)&(arrC(0, 0, 0)), FFTW_ESTIMATE);
#ifdef __USE_OMP__
fftw_plan_with_nthreads(omp_get_max_threads());
#endif
plans[FourierTransformDirection::CtoR][f][d] = fftw_plan_dft_c2r_3d(Nx, Ny, Nz,
(fftw_complex*)&(arrC(0, 0, 0)), &(arrD(0, 0, 0)), FFTW_ESTIMATE);
}
}
void destroyPlans()
{
for (int f = 0; f < 3; f++)
for (int d = 0; d < 3; d++) {
if (plans[FourierTransformDirection::RtoC][f][d] != 0)
fftw_destroy_plan(plans[FourierTransformDirection::RtoC][f][d]);
if (plans[FourierTransformDirection::CtoR][f][d] != 0)
fftw_destroy_plan(plans[FourierTransformDirection::CtoR][f][d]);
}
}
#endif
};
class FourierTransform {
public:
#ifdef __USE_FFT__
static void doDirectFourierTransform(ScalarField<FP>& data, ScalarField<complexFP>& result)
{
fftw_plan plan = 0;
#ifdef __USE_OMP__
fftw_plan_with_nthreads(omp_get_max_threads());
#endif
plan = fftw_plan_dft_r2c_3d(data.getSize().x, data.getSize().y, data.getSize().z,
(FP*)&(data(0, 0, 0)), (fftw_complex*)&(result(0, 0, 0)), FFTW_ESTIMATE);
fftw_execute(plan);
fftw_destroy_plan(plan);
}
static void doInverseFourierTransform(ScalarField<complexFP>& data, ScalarField<FP>& result)
{
fftw_plan plan = 0;
#ifdef __USE_OMP__
fftw_plan_with_nthreads(omp_get_max_threads());
#endif
plan = fftw_plan_dft_c2r_3d(result.getSize().x, result.getSize().y, result.getSize().z,
(fftw_complex*)&(data(0, 0, 0)), (FP*)&(result(0, 0, 0)), FFTW_ESTIMATE);
fftw_execute(plan);
fftw_destroy_plan(plan);
#pragma omp parallel for
for (int i = 0; i < result.getSize().x; i++)
for (int j = 0; j < result.getSize().y; j++)
//#pragma omp simd
for (int k = 0; k < result.getSize().z; k++)
result(i, j, k) /= (FP)result.getSize().x*result.getSize().y*result.getSize().z;
}
#else
static void doDirectFourierTransform(ScalarField<FP>& data, ScalarField<complexFP>& result) {}
static void doInverseFourierTransform(ScalarField<complexFP>& data, ScalarField<FP>& result) {}
#endif
static void doFourierTransform(ScalarField<FP>& field1, ScalarField<complexFP>& field2,
FourierTransformDirection direction)
{
switch (direction) {
case RtoC:
doDirectFourierTransform(field1, field2);
break;
case CtoR:
doInverseFourierTransform(field2, field1);
break;
default:
break;
}
}
static Int3 getSizeOfComplex(const Int3& sizeOfFP)
{
return Int3(sizeOfFP.x, sizeOfFP.y, sizeOfFP.z / 2 + 1);
}
};
} |
main.c | #include "libabl.h"
#include "time.h"
#include "stdio.h"
#include "CL/cl.h"
#include "omp.h"
typedef struct {
float2 pos;
double desiredSpeed;
float2 velocity;
bool isFinished;
int leader;
int id;
float2 target;
int envId;
}Point;
static const type_info Point_info[] = {
{ TYPE_FLOAT2, offsetof(Point, pos), "pos", true },
{ TYPE_FLOAT, offsetof(Point, desiredSpeed), "desiredSpeed", false },
{ TYPE_FLOAT2, offsetof(Point, velocity), "velocity", false },
{ TYPE_BOOL, offsetof(Point, isFinished), "isFinished", false },
{ TYPE_INT, offsetof(Point, leader), "leader", false },
{ TYPE_INT, offsetof(Point, id), "id", false },
{ TYPE_FLOAT2, offsetof(Point, target), "target", false },
{ TYPE_END, sizeof(Point), NULL }
};
typedef struct {
int mem_start;
int mem_end;
} env;
struct agent_struct {
dyn_array agents_Point;
dyn_array agents_Point_dbuf;
};
struct agent_struct agents;
static const agent_info agents_info[] = {
{ Point_info, offsetof(struct agent_struct, agents_Point), "Point" },
{ NULL, 0, NULL }
};
double PI = 3.14159;
int num_timesteps = 1000;
int num_agents = 65536;
double T = 0.54;
double rho = 0.05;
double r = 2.0;
double lambda = 2.0;
double gamma = 0.35;
double n_prime = 3.0;
double n = 2.0;
double A = 4.5;
int aa = 3;
double bb = 0.1;
double stepTime = 0.25;
double W = 1144.87;
float2 random_1(float2 min, float2 max) {
return float2_create(random_float(min.x, max.x), random_float(min.y, max.y));
}
float3 random_2(float3 min, float3 max) {
return float3_create(random_float(min.x, max.x), random_float(min.y, max.y), random_float(min.z, max.z));
}
double random_3(double max) {
return random_float(0, max);
}
float2 random_4(float2 max) {
return random_1(float2_fill(0), max);
}
float3 random_5(float3 max) {
return random_2(float3_fill(0), max);
}
int randomInt_1(int max) {
return random_int(0, max);
}
double clam(double pos, double min, double max) {
return ((pos < min) ? min : ((pos > max) ? max : pos));
}
float2 clam_1(float2 pos, float2 min, float2 max) {
return float2_create(((pos.x < min.x) ? min.x : ((pos.x > max.x) ? max.x : pos.x)), ((pos.y < min.y) ? min.y : ((pos.y > max.y) ? max.y : pos.y)));
}
float3 clam_2(float3 pos, float3 min, float3 max) {
return float3_create(((pos.x < min.x) ? min.x : ((pos.x > max.x) ? max.x : pos.x)), ((pos.y < min.y) ? min.y : ((pos.y > max.y) ? max.y : pos.y)), ((pos.z < min.z) ? min.z : ((pos.z > max.z) ? max.z : pos.z)));
}
double clam_3(double pos, double max) {
return clam(pos, 0, max);
}
float2 clam_4(float2 pos, float2 max) {
return clam_1(pos, float2_fill(0), max);
}
float3 clam_5(float3 pos, float3 max) {
return clam_2(pos, float3_fill(0), max);
}
double wraparound(double pos, double max) {
return ((pos < 0) ? (max + pos) : ((pos >= max) ? (pos - max) : pos));
}
float2 wraparound_1(float2 pos, float2 max) {
return float2_create(((pos.x < 0) ? (max.x + pos.x) : ((pos.x >= max.x) ? (pos.x - max.x) : pos.x)), ((pos.y < 0) ? (max.y + pos.y) : ((pos.y >= max.y) ? (pos.y - max.y) : pos.y)));
}
float3 wraparound_2(float3 pos, float3 max) {
return float3_create(((pos.x < 0) ? (max.x + pos.x) : ((pos.x >= max.x) ? (pos.x - max.x) : pos.x)), ((pos.y < 0) ? (max.y + pos.y) : ((pos.y >= max.y) ? (pos.y - max.y) : pos.y)), ((pos.z < 0) ? (max.z + pos.z) : ((pos.z >= max.z) ? (pos.z - max.z) : pos.z)));
}
bool is_inside(float2 pos, float2 max) {
return ((((pos.x >= 0) && (pos.y >= 0)) && (pos.x <= max.x)) && (pos.y <= max.y));
}
bool isSortNeeded(int p1, int p2) {
if ((p1 > p2)) return true;
return false;
}
bool isSortNeeded_1(float2 p1, float2 p2) {
if ((p1.x > p2.x)) return true;
if ((p1.x == p2.x)) {
if ((p1.y > p2.y)) {
return true;
}
}
return false;
}
int dijkstra(int n, int startnode, int endnode) {
int cost[10000];
int distance[100];
int pred[100];
int visited[100];
int count = 0;
int mindistance = 0;
int nextnode = 0;
for (int i = 0, _var0 = (int) n; i < _var0; ++i) {
for (int j = 0, _var1 = (int) n; j < _var1; ++j) {
if ((((i - j) == 1) || ((j - i) == 1))) cost[((i * (int) n) + j)] = 1; else cost[((i * (int) n) + j)] = 32767;
}
}
for (int i = 0, _var2 = (int) n; i < _var2; ++i) {
distance[i] = cost[(((int) startnode * (int) n) + i)];
pred[i] = startnode;
visited[i] = 0;
}
distance[(int) startnode] = 0;
visited[(int) startnode] = 1;
count = 1;
while ((count < ((int) n - 1))) {
mindistance = 32767;
for (int i = 0, _var3 = (int) n; i < _var3; ++i) {
if (((distance[i] < mindistance) && (!(bool) visited[i]))) {
mindistance = distance[i];
nextnode = i;
}
}
visited[nextnode] = 1;
for (int i = 0, _var4 = (int) n; i < _var4; ++i) {
if ((!(bool) visited[i])) {
if (((mindistance + cost[((nextnode * (int) n) + i)]) < distance[i])) {
distance[i] = (mindistance + cost[((nextnode * (int) n) + i)]);
pred[i] = nextnode;
}
}
}
count = (count + 1);
}
int j = (int) endnode;
while ((pred[j] != (int) startnode)) {
j = pred[j];
}
return j;
}
Point* coexecution_merge_Point(Point* A0, Point* A1) {
if ((A0->id == A0->leader)) A0->target = A1->target;
return A0;
}
int main() {
double wtime = omp_get_wtime();
for (int i = 0, _var5 = num_agents; i < _var5; ++i) {
*DYN_ARRAY_PLACE(&agents.agents_Point, Point) = (Point) {
.pos = random_4(float2_fill(W)),
.desiredSpeed = random_float(0.1, 0.5),
.velocity = float2_create(0, 0),
.id = i,
.leader = (128 * (i / 128)),
.isFinished = false,
.target = float2_create(1, 1),
};
}
cl_int ret;
cl_device_id device_id = NULL;
cl_uint num_of_platforms;
cl_uint num_of_devices=0;
clGetPlatformIDs(0, NULL, &num_of_platforms);
cl_platform_id platform_ids[num_of_platforms];
ret = clGetPlatformIDs( num_of_platforms, platform_ids, NULL );
cl_command_queue command_queues[2];
if (!agents.agents_Point_dbuf.values) {
agents.agents_Point_dbuf = DYN_ARRAY_COPY_FIXED(Point, &agents.agents_Point);
}
size_t _var6_Point = sizeof(Point)*agents.agents_Point.len;
size_t _var6_Point_dbuf = sizeof(Point)*agents.agents_Point.len;
cl_kernel kernel_Point_0s[2];
cl_kernel kernel_Point_1s[2];
cl_kernel st_kernel_Points[2];
cl_kernel mem_kernel_Points[2];
cl_kernel upenv_kernel_Points[2];
cl_mem _var6MemObj_Points[2];
cl_mem _var6MemObjDbuf_Points[2];
cl_mem _var6MemObjLen_Points[2];
cl_mem _var6MemObjEnv_Points[2];
int activeDevice[2]={0,1};
cl_mem _var6MemObjRng_Points[2];
char fileName[] = "kernel.cl";
char *source_str;
size_t source_size;
FILE *fp;
fp = fopen(fileName, "r");
source_str = (char*)malloc(0x100000);
source_size = fread(source_str, 1, 0x100000, fp);
fclose(fp);
for (int deviceIter = 0 ; deviceIter < 2 ; deviceIter++){
int devId = activeDevice[deviceIter];
ret = clGetDeviceIDs( platform_ids[devId], CL_DEVICE_TYPE_ALL, 1, &device_id, &ret );
cl_context_properties contextProperties[] =
{
CL_CONTEXT_PLATFORM,
(cl_context_properties) platform_ids[devId],
0
};
cl_context context = clCreateContextFromType(contextProperties, CL_DEVICE_TYPE_ALL, NULL, NULL, &ret);
cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, 0, &ret);
command_queues[deviceIter] = command_queue;
cl_program program = clCreateProgramWithSource(context, 1, (const char **)&source_str, NULL, &ret);
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
char* kernel_name_prefix = "compute_kernel_";
char number[2];
char kernel_name[50];
cl_mem _var6MemObj_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, _var6_Point, NULL , &ret);
_var6MemObj_Points[deviceIter] = _var6MemObj_Point;
cl_mem _var6MemObjDbuf_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, _var6_Point_dbuf, NULL , &ret);
_var6MemObjDbuf_Points[deviceIter] = _var6MemObjDbuf_Point;
cl_mem _var6MemObjLen_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(int), NULL , &ret);
_var6MemObjLen_Points[deviceIter] = _var6MemObjLen_Point;
cl_mem _var6MemObjEnv_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(env)*13225, NULL , &ret);
_var6MemObjEnv_Points[deviceIter] = _var6MemObjEnv_Point;
cl_uint2 *rngState_Point;
rngState_Point = (cl_uint2 *)calloc(agents.agents_Point.len, sizeof(cl_uint2));
for ( int _var7 = 0 ; _var7 < agents.agents_Point.len ; _var7++ ) {
cl_uint2 _var8 = { (uint32_t)(_var7 * 2), (uint32_t)(_var7 * 2 + 1) };
rngState_Point[_var7] = _var8;
}
cl_mem _var6MemObjRng_Point = clCreateBuffer(context, CL_MEM_READ_WRITE, sizeof(cl_uint2)*agents.agents_Point.len, NULL , &ret);
_var6MemObjRng_Points[deviceIter] = _var6MemObjRng_Point;
sprintf(number,"%d", deviceIter);
sprintf(kernel_name,"%s%s_%s_%s", kernel_name_prefix, number, "Point","0");
cl_kernel kernel_Point_0 = clCreateKernel(program, kernel_name, &ret);
kernel_Point_0s[deviceIter] = kernel_Point_0;
sprintf(number,"%d", deviceIter);
sprintf(kernel_name,"%s%s_%s_%s", kernel_name_prefix, number, "Point","1");
cl_kernel kernel_Point_1 = clCreateKernel(program, kernel_name, &ret);
kernel_Point_1s[deviceIter] = kernel_Point_1;
cl_kernel st_kernel_Point = clCreateKernel(program, "sorting_Point", &ret);
st_kernel_Points[deviceIter] = st_kernel_Point;
cl_kernel mem_kernel_Point = clCreateKernel(program, "mem_update_Point", &ret);
mem_kernel_Points[deviceIter] = mem_kernel_Point;
cl_kernel upenv_kernel_Point = clCreateKernel(program, "update_envId_Point", &ret);
upenv_kernel_Points[deviceIter] = upenv_kernel_Point;
ret = clSetKernelArg(kernel_Point_0, 0, sizeof(cl_mem), &_var6MemObj_Point);
ret |= clSetKernelArg(kernel_Point_0, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point);
ret |= clSetKernelArg(kernel_Point_0, 2, sizeof(cl_mem), &_var6MemObjLen_Point);
ret |= clSetKernelArg(kernel_Point_0, 3, sizeof(cl_mem), &_var6MemObjEnv_Point);
ret |= clSetKernelArg(kernel_Point_0, 4, sizeof(cl_mem), &_var6MemObjRng_Point);
ret = clSetKernelArg(kernel_Point_1, 0, sizeof(cl_mem), &_var6MemObj_Point);
ret |= clSetKernelArg(kernel_Point_1, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point);
ret |= clSetKernelArg(kernel_Point_1, 2, sizeof(cl_mem), &_var6MemObjLen_Point);
ret |= clSetKernelArg(kernel_Point_1, 3, sizeof(cl_mem), &_var6MemObjEnv_Point);
ret |= clSetKernelArg(kernel_Point_1, 4, sizeof(cl_mem), &_var6MemObjRng_Point);
ret = clSetKernelArg(upenv_kernel_Point, 0, sizeof(cl_mem), &_var6MemObj_Point);
ret |= clSetKernelArg(upenv_kernel_Point, 1, sizeof(cl_mem), &_var6MemObjLen_Point);
ret = clSetKernelArg(st_kernel_Point, 0, sizeof(cl_mem), &_var6MemObj_Point);
ret |= clSetKernelArg(st_kernel_Point, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point);
ret |= clSetKernelArg(st_kernel_Point, 2, sizeof(cl_mem), &_var6MemObjLen_Point);
ret = clSetKernelArg(mem_kernel_Point, 0, sizeof(cl_mem), &_var6MemObj_Point);
ret |= clSetKernelArg(mem_kernel_Point, 1, sizeof(cl_mem), &_var6MemObjDbuf_Point);
ret |= clSetKernelArg(mem_kernel_Point, 2, sizeof(cl_mem), &_var6MemObjLen_Point);
ret |= clSetKernelArg(mem_kernel_Point, 3, sizeof(cl_mem), &_var6MemObjEnv_Point);
ret = clEnqueueWriteBuffer(command_queue, _var6MemObj_Point, CL_TRUE, 0, _var6_Point, agents.agents_Point.values, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, _var6MemObjDbuf_Point, CL_TRUE, 0, _var6_Point_dbuf, agents.agents_Point_dbuf.values, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, _var6MemObjLen_Point, CL_TRUE, 0, sizeof(int), &agents.agents_Point.len, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, _var6MemObjRng_Point, CL_TRUE, 0, sizeof(cl_uint2)*agents.agents_Point.len, rngState_Point, 0, NULL, NULL);
}
size_t localWorkSize = 128;
Point* _var9buff_Point0 = calloc(sizeof(Point), agents.agents_Point.len);
Point* _var9buff_Point1 = calloc(sizeof(Point), agents.agents_Point.len);
Point* _var9buffMerge_Point = calloc(sizeof(Point), agents.agents_Point.len);
env *_var9Env_Point = calloc(sizeof(env),13225);
size_t globalWorkSize_Point = roundWorkSizeUp(128, agents.agents_Point.len);
ret = clEnqueueNDRangeKernel(command_queues[0], upenv_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL);
for (int length = 1; length < globalWorkSize_Point; length <<= 1)
for (int inc = length; inc > 0; inc >>= 1) {
int dir = length << 1;
clSetKernelArg(st_kernel_Points[0], 3, sizeof(int), &inc);
clSetKernelArg(st_kernel_Points[0], 4, sizeof(int), &dir);
clEnqueueNDRangeKernel(command_queues[0], st_kernel_Points[0], 1, NULL, &globalWorkSize_Point, NULL, 0, NULL, NULL);
clFinish(command_queues[0]);
}
ret |= clEnqueueNDRangeKernel(command_queues[0], mem_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL);
clFinish(command_queues[0]);
clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL);
clEnqueueReadBuffer(command_queues[0], _var6MemObjEnv_Points[0], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL);
clEnqueueWriteBuffer(command_queues[1], _var6MemObj_Points[1], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL);
clEnqueueWriteBuffer(command_queues[1], _var6MemObjEnv_Points[1], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL);
for (int _var10 = 0; _var10 < num_timesteps; _var10++) {
ret = clEnqueueNDRangeKernel(command_queues[0], kernel_Point_0s[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL);
ret |= clEnqueueNDRangeKernel(command_queues[1], kernel_Point_0s[1], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL);
printf("%d %d\n",_var10,ret);
clFinish(command_queues[1]);
clFinish(command_queues[0]);
clEnqueueReadBuffer(command_queues[1], _var6MemObj_Points[1], CL_TRUE, 0, _var6_Point, _var9buff_Point1, 0, NULL, NULL);
clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL);
#pragma omp for schedule(static, 128)
for (int _var11 = 0; _var11 < agents.agents_Point.len; _var11++) {
Point *_agent = coexecution_merge_Point(&_var9buff_Point0[_var11], &_var9buff_Point1[_var11]);
_var9buffMerge_Point[_var11] = *_agent;
}
clEnqueueWriteBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buffMerge_Point, 0, NULL, NULL);
ret = clEnqueueNDRangeKernel(command_queues[0], upenv_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL);
for (int length = 1; length < globalWorkSize_Point; length <<= 1)
for (int inc = length; inc > 0; inc >>= 1) {
int dir = length << 1;
clSetKernelArg(st_kernel_Points[0], 3, sizeof(int), &inc);
clSetKernelArg(st_kernel_Points[0], 4, sizeof(int), &dir);
clEnqueueNDRangeKernel(command_queues[0], st_kernel_Points[0], 1, NULL, &globalWorkSize_Point, NULL, 0, NULL, NULL);
clFinish(command_queues[0]);
}
ret |= clEnqueueNDRangeKernel(command_queues[0], mem_kernel_Points[0], 1, NULL, &globalWorkSize_Point, &localWorkSize, 0, NULL, NULL);
clFinish(command_queues[0]);
clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL);
clEnqueueReadBuffer(command_queues[0], _var6MemObjEnv_Points[0], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL);
clEnqueueWriteBuffer(command_queues[1], _var6MemObj_Points[1], CL_TRUE, 0, _var6_Point, _var9buff_Point0, 0, NULL, NULL);
clEnqueueWriteBuffer(command_queues[1], _var6MemObjEnv_Points[1], CL_TRUE, 0, sizeof(env)*13225, _var9Env_Point, 0, NULL, NULL);
}
ret = clEnqueueReadBuffer(command_queues[0], _var6MemObj_Points[0], CL_TRUE, 0, _var6_Point, agents.agents_Point.values, 0, NULL, NULL);
wtime = omp_get_wtime() - wtime;
printf("Time elapsed is %f seconds\n", wtime);
return 0;
}
|
GB_AxB_saxpy3_flopcount.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_flopcount: compute flops for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// On input, A, B, and M (optional) are matrices for C=A*B, C<M>=A*B, or
// C<!M>=A*B. The flop count for each B(:,j) is computed, and returned as a
// cumulative sum. This function is CSR/CSC agnostic, but for simplicity of
// this description, assume A and B are both CSC matrices, so that ncols(A) ==
// nrows(B). For both CSR and CSC, A->vdim == B->vlen holds. A and/or B may
// be hypersparse, in any combination.
// Bflops has size (B->nvec)+1, for both standard and hypersparse B. Let
// n=B->vdim be the column dimension of B (that is, B is m-by-n).
// If B is a standard CSC matrix then Bflops has size n+1 == B->nvec+1, and on
// output, Bflops [j] is the # of flops required to compute C (:, 0:j-1). B->h
// is NULL, and is implicitly the vector 0:(n-1).
// If B is hypersparse, then let Bh = B->h. Its size is B->nvec, and j = Bh
// [kk] is the (kk)th column in the data structure for B. C will also be
// hypersparse, and only C(:,Bh) will be computed (C may have fewer non-empty
// columns than B). On output, Bflops [kk] is the number of needed flops to
// compute C (:, Bh [0:kk-1]).
// In both cases, Bflops [0] = 0, and Bflops [B->nvec] = total number of flops.
// The size of Bflops is B->nvec+1 so that it has the same size as B->p. The
// first entry of B->p and Bflops are both zero. This allows B to be sliced
// either by # of entries in B (by slicing B->p) or by the flop count required
// (by slicing Bflops).
// This algorithm does not look at the values of M, A, or B, just their
// patterns. The flop count of C=A*B, C<M>=A*B, or C<!M>=A*B is computed for a
// saxpy-based method; the work for A'*B for the dot product method is not
// computed.
// The algorithm scans all nonzeros in B. It only scans at most the min and
// max (first and last) row indices in A and M (if M is present). If A and M
// are not hypersparse, the time taken is O(nnz(B)+n). If all matrices are
// hypersparse, the time is O(nnz(B)*log(h)) where h = max # of vectors present
// in A and M. Assuming B is in standard (not hypersparse) form:
/*
[m n] = size (B) ;
Bflops = zeros (1,n+1) ; % (set to zero in the caller)
Mwork = 0 ;
for each column j in B:
if (B (:,j) is empty) continue ;
mjnz = nnz (M (:,j))
if (M is present, not complemented, and M (:,j) is empty) continue ;
Bflops (j) = mjnz if M present and not dense, to scatter M(:,j)
Mwork += mjnz
for each k where B (k,j) is nonzero:
aknz = nnz (A (:,k))
if (aknz == 0) continue ;
% numerical phase will compute: C(:,j)<#M(:,j)> += A(:,k)*B(k,j)
% where #M is no mask, M, or !M. This typically takes aknz flops,
% or with a binary search if nnz(M(:,j)) << nnz(A(:,k)).
Bflops (j) += aknz
end
end
*/
#include "GB_mxm.h"
#include "GB_ek_slice.h"
#include "GB_bracket.h"
#include "GB_AxB_saxpy3.h"
#define GB_FREE_ALL \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (B_ek_slicing, int64_t) ; \
}
GB_PUBLIC
GrB_Info GB_AxB_saxpy3_flopcount
(
int64_t *Mwork, // amount of work to handle the mask M
int64_t *Bflops, // size B->nvec+1
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, mask is complemented
const GrB_Matrix A,
const GrB_Matrix B,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK_OR_NULL (M, "M for flop count A*B", GB0) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT_MATRIX_OK (A, "A for flop count A*B", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (B, "B for flop count A*B", GB0) ;
ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT (A->vdim == B->vlen) ;
ASSERT (Bflops != NULL) ;
ASSERT (Mwork != NULL) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t bnvec = B->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
// clear Bflops
GB_memset (Bflops, 0, (bnvec+1) * sizeof (int64_t), nthreads_max) ;
//--------------------------------------------------------------------------
// get the mask, if present: any sparsity structure
//--------------------------------------------------------------------------
bool mask_is_M = (M != NULL && !Mask_comp) ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
int64_t mnvec = 0 ;
int64_t mvlen = 0 ;
bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
bool M_is_dense = false ;
if (M != NULL)
{
Mh = M->h ;
Mp = M->p ;
mnvec = M->nvec ;
mvlen = M->vlen ;
M_is_dense = GB_IS_BITMAP (M) || GB_as_if_full (M) ;
}
//--------------------------------------------------------------------------
// get A and B: any sparsity structure
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t anvec = A->nvec ;
const int64_t avlen = A->vlen ;
const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
const bool B_is_bitmap = GB_IS_BITMAP (B) ;
const bool B_is_sparse_or_hyper = B_is_hyper || GB_IS_SPARSE (B) ;
const int64_t bvlen = B->vlen ;
const bool B_jumbled = B->jumbled ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
//--------------------------------------------------------------------------
// construct the parallel tasks
//--------------------------------------------------------------------------
int B_ntasks, B_nthreads ;
GB_SLICE_MATRIX (B, 64, chunk) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 2*B_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + B_ntasks ;
//--------------------------------------------------------------------------
// compute flop counts for C=A*B, C<M>=A*B, or C<!M>=A*B
//--------------------------------------------------------------------------
int64_t total_Mwork = 0 ;
int taskid ;
#pragma omp parallel for num_threads(B_nthreads) schedule(dynamic,1) \
reduction(+:total_Mwork)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
Wfirst [taskid] = 0 ;
Wlast [taskid] = 0 ;
int64_t mpleft = 0 ; // for GB_lookup of the mask M
int64_t task_Mwork = 0 ;
//----------------------------------------------------------------------
// count flops for vectors kfirst to klast of B
//----------------------------------------------------------------------
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
// nnz (B (:,j)), for all tasks
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ;
// C(:,j) is empty if the entire vector B(:,j) is empty
if (bjnz == 0) continue ;
//------------------------------------------------------------------
// find the part of B(:,j) to be computed by this task
//------------------------------------------------------------------
int64_t pB, pB_end ;
GB_get_pA (&pB, &pB_end, taskid, kk,
kfirst, klast, pstart_Bslice, Bp, bvlen) ;
int64_t my_bjnz = pB_end - pB ;
int64_t j = GBH (Bh, kk) ;
//------------------------------------------------------------------
// see if M(:,j) is present and non-empty
//------------------------------------------------------------------
// if M(:,j) is full, bitmap, or dense, do not add mjnz to bjflops
// or task_MWork.
int64_t bjflops = (B_is_bitmap) ? my_bjnz : 0 ;
int64_t mjnz = 0 ;
if (M != NULL && !M_is_dense)
{
int64_t mpright = mnvec - 1 ;
int64_t pM, pM_end ;
GB_lookup (M_is_hyper, Mh, Mp, mvlen, &mpleft, mpright, j,
&pM, &pM_end) ;
mjnz = pM_end - pM ;
// If M not complemented: C(:,j) is empty if M(:,j) is empty.
if (mjnz == 0 && !Mask_comp) continue ;
if (mjnz > 0)
{
// M(:,j) not empty
if (pB == GBP (Bp, kk, bvlen))
{
// this task owns the top part of B(:,j), so it can
// account for the work to access M(:,j), without the
// work being duplicated by other tasks working on
// B(:,j)
bjflops = mjnz ;
// keep track of total work spent examining the mask.
// If any B(:,j) is empty, M(:,j) can be ignored. So
// total_Mwork will be <= nnz (M).
task_Mwork += mjnz ;
}
}
}
int64_t mjnz_much = 64 * mjnz ;
//------------------------------------------------------------------
// trim Ah on right
//------------------------------------------------------------------
// Ah [0..A->nvec-1] holds the set of non-empty vectors of A, but
// only vectors k corresponding to nonzero entries B(k,j) are
// accessed for this vector B(:,j). If nnz (B(:,j)) > 2, prune the
// search space on the right, so the remaining calls to GB_lookup
// will only need to search Ah [pleft...pright-1]. pright does not
// change. pleft is advanced as B(:,j) is traversed, since the
// indices in B(:,j) are sorted in ascending order.
int64_t pleft = 0 ;
int64_t pright = anvec-1 ;
if (A_is_hyper && B_is_sparse_or_hyper && my_bjnz > 2 && !B_jumbled)
{
// trim Ah [0..pright] to remove any entries past last B(:,j)
int64_t ilast = Bi [pB_end-1] ;
GB_bracket_right (ilast, Ah, 0, &pright) ;
}
//------------------------------------------------------------------
// count the flops to compute C(:,j)<#M(:,j)> = A*B(:,j)
//------------------------------------------------------------------
// where #M is either not present, M, or !M
for ( ; pB < pB_end ; pB++)
{
// get B(k,j)
int64_t k = GBI (Bi, pB, bvlen) ;
if (!GBB (Bb, pB)) continue ;
// B(k,j) is nonzero
// find A(:,k), reusing pleft if B is not jumbled
if (B_jumbled)
{
pleft = 0 ;
}
int64_t pA, pA_end ;
GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft, pright, k,
&pA, &pA_end) ;
// skip if A(:,k) empty
const int64_t aknz = pA_end - pA ;
if (aknz == 0) continue ;
double bkjflops ;
// skip if intersection of A(:,k) and M(:,j) is empty
// and mask is not complemented (C<M>=A*B)
if (mask_is_M)
{
// A(:,k) is non-empty; get first and last index of A(:,k)
if (aknz > 256 && mjnz_much < aknz && mjnz < mvlen &&
aknz < avlen && !(A->jumbled))
{
// scan M(:j), and do binary search for A(i,j)
bkjflops = mjnz * (1 + 4 * log2 ((double) aknz)) ;
}
else
{
// scan A(:k), and lookup M(i,j)
bkjflops = aknz ;
}
}
else
{
// A(:,k)*B(k,j) requires aknz flops
bkjflops = aknz ;
}
// increment by flops for the single entry B(k,j)
// C(:,j)<#M(:,j)> += A(:,k)*B(k,j).
bjflops += bkjflops ;
}
//------------------------------------------------------------------
// log the flops for B(:,j)
//------------------------------------------------------------------
if (kk == kfirst)
{
Wfirst [taskid] = bjflops ;
}
else if (kk == klast)
{
Wlast [taskid] = bjflops ;
}
else
{
Bflops [kk] = bjflops ;
}
}
// compute the total work to access the mask, which is <= nnz (M)
total_Mwork += task_Mwork ;
}
//--------------------------------------------------------------------------
// reduce the first and last vector of each slice
//--------------------------------------------------------------------------
// See also Template/GB_select_phase1.c
int64_t kprior = -1 ;
for (int taskid = 0 ; taskid < B_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// sum up the partial flops that taskid computed for kfirst
//----------------------------------------------------------------------
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
if (kfirst <= klast)
{
int64_t pB = pstart_Bslice [taskid] ;
int64_t pB_end = GBP (Bp, kfirst+1, bvlen) ;
pB_end = GB_IMIN (pB_end, pstart_Bslice [taskid+1]) ;
if (pB < pB_end)
{
if (kprior < kfirst)
{
// This task is the first one that did work on
// B(:,kfirst), so use it to start the reduction.
Bflops [kfirst] = Wfirst [taskid] ;
}
else
{
// subsequent task for B(:,kfirst)
Bflops [kfirst] += Wfirst [taskid] ;
}
kprior = kfirst ;
}
}
//----------------------------------------------------------------------
// sum up the partial flops that taskid computed for klast
//----------------------------------------------------------------------
if (kfirst < klast)
{
int64_t pB = GBP (Bp, klast, bvlen) ;
int64_t pB_end = pstart_Bslice [taskid+1] ;
if (pB < pB_end)
{
/* if */ ASSERT (kprior < klast) ;
{
// This task is the first one that did work on
// B(:,klast), so use it to start the reduction.
Bflops [klast] = Wlast [taskid] ;
}
/*
else
{
// If kfirst < klast and B(:,klast) is not empty,
// then this task is always the first one to do
// work on B(:,klast), so this case is never used.
ASSERT (GB_DEAD_CODE) ;
// subsequent task to work on B(:,klast)
Bflops [klast] += Wlast [taskid] ;
}
*/
kprior = klast ;
}
}
}
//--------------------------------------------------------------------------
// cumulative sum of Bflops
//--------------------------------------------------------------------------
// Bflops = cumsum ([0 Bflops]) ;
ASSERT (Bflops [bnvec] == 0) ;
GB_cumsum (Bflops, bnvec, NULL, B_nthreads, Context) ;
// Bflops [bnvec] is now the total flop count, including the time to
// compute A*B and to handle the mask. total_Mwork is part of this total
// flop count, but is also returned separtely.
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
(*Mwork) = total_Mwork ;
return (GrB_SUCCESS) ;
}
|
valid.mob4.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_128_28_28_128_3_3.h"
#include "gen_ukr_A4B2gemm_1_128_28_28_128_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 28;
int Ny = 28;
int Nh = 3;
long long Astrides[6] = {0,2,4,6,8,10};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<128+0;c5+=128)
{
for(int xy5=0;xy5<784+0;xy5+=784)
{
for(int f5=0;f5<128+0;f5+=128)
{
for(int c4=c5;c4<min(128, 128+c5);c4+=128)
{
for(int xy4=xy5;xy4<min(784, 784+xy5);xy4+=784)
{
for(int f4=f5;f4<min(128, 128+f5);f4+=Tf2)
{
for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(128, Tf2+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(784, 784+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(784, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(128, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(784, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(128, 16+f2);f1+=16)
{
int ctile=min(Tc1, 128-c1);
int x1=xy1/28;
int y1=xy1%28/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*430592+c1_1*3364+2*x1*58+2*y1*1+c1_2*1;
int offsetB=0+kf1_1*18432+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*100352+of1_1*784+x1*28+y1*1+of1_2*1;
if(28-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(28*28-xy1>=6){
for(int sti=28-y1;sti<6;sti+=1)
{
Astrides[sti]+=60;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=28-y1;sti<6;sti+=1)
{
Astrides[sti]-=60;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
pdgstrf.c |
/*! @file
* \brief Performs LU factorization in parallel
*
* <pre>
* -- Distributed SuperLU routine (version 4.3) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
* Modified:
* September 1, 1999
* Feburary 7, 2001 use MPI_Isend/MPI_Irecv
* October 15, 2008 latency-reducing panel factorization
* July 12, 2011 static scheduling and arbitrary look-ahead
* March 13, 2013 change NTAGS to MPI_TAG_UB value
* September 24, 2015 replace xLAMCH by xMACH, using C99 standard.
* December 31, 2015 rename xMACH to xMACH_DIST
*
* Sketch of the algorithm
*
* =======================
*
* The following relations hold:
* * A_kk = L_kk * U_kk
* * L_ik = Aik * U_kk^(-1)
* * U_kj = L_kk^(-1) * A_kj
*
* ----------------------------------
* | | |
* ----|-----------------------------
* | | \ U_kk| |
* | | \ | U_kj |
* | |L_kk \ | || |
* ----|-------|---------||----------
* | | | \/ |
* | | | |
* | | | |
* | | | |
* | | L_ik ==> A_ij |
* | | | |
* | | | |
* | | | |
* ----------------------------------
*
* Handle the first block of columns separately.
* * Factor diagonal and subdiagonal blocks and test for exact
* singularity. ( pdgstrf2(0), one column at a time )
* * Compute block row of U
* * Update trailing matrix
*
* Loop over the remaining blocks of columns.
* mycol = MYCOL( iam, grid );
* myrow = MYROW( iam, grid );
* N = nsupers;
* For (k = 1; k < N; ++k) {
* krow = PROW( k, grid );
* kcol = PCOL( k, grid );
* Pkk = PNUM( krow, kcol, grid );
*
* * Factor diagonal and subdiagonal blocks and test for exact
* singularity.
* if ( mycol == kcol ) {
* pdgstrf2(k), one column at a time
* }
*
* * Parallel triangular solve
* if ( iam == Pkk ) multicast L_k,k to this process row;
* if ( myrow == krow && mycol != kcol ) {
* Recv L_k,k from process Pkk;
* for (j = k+1; j < N; ++j)
* if ( PCOL( j, grid ) == mycol && A_k,j != 0 )
* U_k,j = L_k,k \ A_k,j;
* }
*
* * Parallel rank-k update
* if ( myrow == krow ) multicast U_k,k+1:N to this process column;
* if ( mycol == kcol ) multicast L_k+1:N,k to this process row;
* if ( myrow != krow ) {
* Pkj = PNUM( krow, mycol, grid );
* Recv U_k,k+1:N from process Pkj;
* }
* if ( mycol != kcol ) {
* Pik = PNUM( myrow, kcol, grid );
* Recv L_k+1:N,k from process Pik;
* }
* for (j = k+1; k < N; ++k) {
* for (i = k+1; i < N; ++i)
* if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
* && L_i,k != 0 && U_k,j != 0 )
* A_i,j = A_i,j - L_i,k * U_k,j;
* }
* }
*
* </pre>
*/
#include <math.h>
/*#include "mkl.h"*/
#include "superlu_ddefs.h"
#ifdef GPU_ACC
#include "cublas_utils.h"
/*#include "cublas_dgemm.h"*/
// #define NUM_CUDA_STREAMS 16
// #define NUM_CUDA_STREAMS 16
#endif
/* Various defininations */
/*
Name : SUPERNODE_PROFILE
Purpose : For SuperNode Level profiling of various measurements such as gigaflop/sec
obtained,bandwidth achived:
Overhead : Low
*/
// #define SUPERNODE_PROFILE
/*
Name : BAELINE
Purpose : baseline to compare performance against
Overhead : NA : this wont be used for running experiments
*/
// #define BASELINE
/*
Name : PHI_FRAMEWORK
Purpose : To simulate and test algorithm used for offloading Phi
Overhead : NA : this wont be used for running experiments
*/
#define PHI_FRAMEWORK
#define PDGSTRF2 pdgstrf2_trsm
#define PDGSTRS2 pdgstrs2_omp
extern void PDGSTRF2 (superlu_options_t *, int_t, int_t, double,
Glu_persist_t *, gridinfo_t *, LocalLU_t *,
MPI_Request *, int, SuperLUStat_t *, int *);
#ifdef _CRAY
extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *,
LocalLU_t *, SuperLUStat_t *, _fcd, _fcd, _fcd);
#else
extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *,
LocalLU_t *, SuperLUStat_t *);
#endif
#define ISORT /* Note: qsort() has bug on Mac */
#ifdef ISORT
extern void isort (int_t N, int_t * ARRAY1, int_t * ARRAY2);
extern void isort1 (int_t N, int_t * ARRAY);
#else
int
superlu_sort_perm (const void *arg1, const void *arg2)
{
const int_t *val1 = (const int_t *) arg1;
const int_t *val2 = (const int_t *) arg2;
return (*val2 < *val1);
}
#endif
int get_thread_per_process()
{
char* ttemp;
ttemp = getenv("THREAD_PER_PROCESS");
if(ttemp) return atoi(ttemp);
else return 1;
}
int
get_mic_offload ()
{
char *ttemp;
ttemp = getenv ("SUPERLU_MIC_OFFLOAD");
if (ttemp)
return atoi (ttemp);
else
return 0;
}
int_t
get_max_buffer_size ()
{
char *ttemp;
ttemp = getenv ("MAX_BUFFER_SIZE");
if (ttemp)
return atoi (ttemp);
else
return 5000000;
}
int_t
get_cublas_nb ()
{
char *ttemp;
ttemp = getenv ("CUBLAS_NB");
if (ttemp)
return atoi (ttemp);
else
return 64;
}
int_t
get_num_cuda_streams ()
{
char *ttemp;
ttemp = getenv ("NUM_CUDA_STREAMS");
if (ttemp)
return atoi (ttemp);
else
return 8;
}
/*int omp_get_num_threads (void);
int omp_get_thread_num (void);*/
int AssignMic(int my_rank)
{
return (my_rank+1)%2;
}
/************************************************************************/
#include "dscatter.c"
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
*
* PDGSTRF performs the LU factorization in parallel.
*
* Arguments
* =========
*
* options (input) superlu_options_t*
* The structure defines the input parameters to control
* how the LU decomposition will be performed.
* The following field should be defined:
* o ReplaceTinyPivot (yes_no_t)
* Specifies whether to replace the tiny diagonals by
* sqrt(epsilon)*norm(A) during LU factorization.
*
* m (input) int
* Number of rows in the matrix.
*
* n (input) int
* Number of columns in the matrix.
*
* anorm (input) double
* The norm of the original matrix A, or the scaled A if
* equilibration was done.
*
* LUstruct (input/output) LUstruct_t*
* The data structures to store the distributed L and U factors.
* The following fields should be defined:
*
* o Glu_persist (input) Glu_persist_t*
* Global data structure (xsup, supno) replicated on all processes,
* describing the supernode partition in the factored matrices
* L and U:
* xsup[s] is the leading column of the s-th supernode,
* supno[i] is the supernode number to which column i belongs.
*
* o Llu (input/output) LocalLU_t*
* The distributed data structures to store L and U factors.
* See superlu_ddefs.h for the definition of 'LocalLU_t'.
*
* grid (input) gridinfo_t*
* The 2D process mesh. It contains the MPI communicator, the number
* of process rows (NPROW), the number of process columns (NPCOL),
* and my process rank. It is an input argument to all the
* parallel routines.
* Grid can be initialized by subroutine SUPERLU_GRIDINIT.
* See superlu_ddefs.h for the definition of 'gridinfo_t'.
*
* stat (output) SuperLUStat_t*
* Record the statistics on runtime and floating-point operation count.
* See util.h for the definition of 'SuperLUStat_t'.
*
* info (output) int*
* = 0: successful exit
* < 0: if info = -i, the i-th argument had an illegal value
* > 0: if info = i, U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* and division by zero will occur if it is used to solve a
* system of equations.
* </pre>
*/
int_t
pdgstrf(superlu_options_t * options, int m, int n, double anorm,
LUstruct_t * LUstruct, gridinfo_t * grid, SuperLUStat_t * stat, int *info)
{
#ifdef _CRAY
_fcd ftcs = _cptofcd ("N", strlen ("N"));
_fcd ftcs1 = _cptofcd ("L", strlen ("L"));
_fcd ftcs2 = _cptofcd ("N", strlen ("N"));
_fcd ftcs3 = _cptofcd ("U", strlen ("U"));
#endif
double zero = 0.0, alpha = 1.0, beta = 0.0;
int_t *xsup;
int_t *lsub, *lsub1, *usub, *Usub_buf;
int_t **Lsub_buf_2, **Usub_buf_2;
double **Lval_buf_2, **Uval_buf_2; /* pointers to starts of bufs */
double *lusup, *lusup1, *uval, *Uval_buf; /* pointer to current buf */
int_t fnz, i, ib, ijb, ilst, it, iukp, jb, jj, klst, knsupc,
lb, lib, ldv, ljb, lptr, lptr0, lptrj, luptr, luptr0, luptrj,
nlb, nub, nsupc, rel, rukp, il, iu;
int_t Pc, Pr;
int iam, kcol, krow, yourcol, mycol, myrow, pi, pj;
int j, k, lk, nsupers; /* k - current panel to work on */
int k0; /* counter of the next supernode to be factored */
int kk, kk0, kk1, kk2, jj0; /* panels in the look-ahead window */
int iukp0, rukp0, flag0, flag1;
int nsupr, nbrow, segsize;
int msg0, msg2;
int_t **Ufstnz_br_ptr, **Lrowind_bc_ptr;
double **Unzval_br_ptr, **Lnzval_bc_ptr;
int_t *index;
double *nzval;
int_t *iuip, *ruip; /* Pointers to U index/nzval; size ceil(NSUPERS/Pr). */
double *ucol;
int *indirect, *indirect2;
double *tempv, *tempv2d;
int iinfo;
int *ToRecv, *ToSendD, **ToSendR;
Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
LocalLU_t *Llu = LUstruct->Llu;
superlu_scope_t *scp;
float s_eps;
double thresh;
double *tempU2d, *tempu;
int full, ldt, ldu, lead_zero, ncols, ncb, nrb, p, pr, pc, nblocks;
int_t *etree_supno_l, *etree_supno, *blocks, *blockr, *Ublock, *Urows,
*Lblock, *Lrows, *perm_u, *sf_block, *sf_block_l, *nnodes_l,
*nnodes_u, *edag_supno_l, *recvbuf, **edag_supno;
float edag_supno_l_bytes;
#ifdef ISORT
int_t *iperm_u;
#endif
int *msgcnt; /* Count the size of the message xfer'd in each buffer:
* 0 : transferred in Lsub_buf[]
* 1 : transferred in Lval_buf[]
* 2 : transferred in Usub_buf[]
* 3 : transferred in Uval_buf[]
*/
int **msgcnts, **msgcntsU;
int *factored, *factoredU, nnodes, *sendcnts, *sdispls, *recvcnts,
*rdispls, *srows, *rrows;
etree_node *head, *tail, *ptr;
int *num_child;
int num_look_aheads, look_id, *look_ahead;
int_t *perm_c_supno, *iperm_c_supno;
MPI_Request *recv_req, **recv_reqs, **send_reqs, **send_reqs_u,
**recv_reqs_u;
MPI_Request *send_req, *U_diag_blk_send_req = NULL;
MPI_Status status;
void *attr_val;
int flag;
int iword = sizeof (int_t);
int dword = sizeof (double);
double scatter_timer = 0;
double gemm_timer = 0;
/* For measuring load imbalence in omp threads*/
double omp_load_imblc = 0.0;
double *omp_loop_time;
double CPUOffloadTimer = 0;
double CPUOffloadFlop = 0;
double CPUOffloadMop = 0;
double schur_flop_timer = 0.0;
double pdgstrf2_timer = 0.0;
double pdgstrs2_timer = 0.0;
double lookaheadupdatetimer = 0.0;
#if !defined( GPU_ACC )
/* Counter for couting memory operations */
double scatter_mem_op_counter = 0.0;
double scatter_mem_op_timer = 0.0;
double scatterL_mem_op_counter = 0.0;
double scatterL_mem_op_timer = 0.0;
double scatterU_mem_op_counter = 0.0;
double scatterU_mem_op_timer = 0.0;
double LookAheadRowSepTimer = 0.0;
double LookAheadRowSepMOP = 0.0;
double GatherTimer = 0.0;
double GatherMOP = 0.0;
double LookAheadGEMMTimer = 0.0;
double LookAheadGEMMFlOp = 0.0;
double LookAheadScatterTimer = 0.0;
double LookAheadScatterMOP = 0.0;
double schur_flop_counter = 0.0;
#endif
#if ( DEBUGlevel>=2 )
int_t num_copy = 0, num_update = 0;
#endif
#if ( PRNTlevel==3 )
int zero_msg = 0, total_msg = 0;
#endif
#if ( PROFlevel>=1 )
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
#endif
/* Test the input parameters. */
*info = 0;
if (m < 0)
*info = -2;
else if (n < 0)
*info = -3;
if (*info) {
pxerr_dist ("pdgstrf", grid, -*info);
return (-1);
}
/* Quick return if possible. */
if (m == 0 || n == 0) return 0;
/*
* Initialization.
*/
iam = grid->iam;
Pc = grid->npcol;
Pr = grid->nprow;
myrow = MYROW (iam, grid);
mycol = MYCOL (iam, grid);
nsupers = Glu_persist->supno[n - 1] + 1;
xsup = Glu_persist->xsup;
s_eps = smach_dist("Epsilon");
thresh = s_eps * anorm;
MPI_Attr_get (MPI_COMM_WORLD, MPI_TAG_UB, &attr_val, &flag);
if (!flag) {
fprintf (stderr, "Could not get TAG_UB\n");
return (-1);
}
int tag_ub = *(int *) attr_val;
#if ( PRNTlevel>=1 )
if (!iam)
printf ("MPI tag upper bound = %d\n", tag_ub);
#endif
#if ( DEBUGlevel>=1 )
if (s_eps == 0.0)
printf (" ***** warning s_eps = %e *****\n", s_eps);
CHECK_MALLOC (iam, "Enter pdgstrf()");
#endif
stat->ops[FACT] = 0.0;
stat->current_buffer = 0.0;
stat->peak_buffer = 0.0;
stat->gpu_buffer = 0.0;
/* make sure the range of look-ahead window [0, MAX_LOOKAHEADS-1] */
num_look_aheads = SUPERLU_MAX(0, SUPERLU_MIN(options->num_lookaheads, MAX_LOOKAHEADS - 1));
if (Pr * Pc > 1) {
if (!(U_diag_blk_send_req =
(MPI_Request *) SUPERLU_MALLOC (Pr * sizeof (MPI_Request))))
ABORT ("Malloc fails for U_diag_blk_send_req[].");
/* flag no outstanding Isend */
U_diag_blk_send_req[myrow] = MPI_REQUEST_NULL; /* used 0 before */
/* allocating buffers for look-ahead */
i = Llu->bufmax[0];
if (i != 0) {
if ( !(Llu->Lsub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * ((size_t) i))) )
ABORT ("Malloc fails for Lsub_buf.");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Lsub_buf_2[jj + 1] = Llu->Lsub_buf_2[jj] + i;
}
i = Llu->bufmax[1];
if (i != 0) {
if (!(Llu->Lval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * ((size_t) i))))
ABORT ("Malloc fails for Lval_buf[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Lval_buf_2[jj + 1] = Llu->Lval_buf_2[jj] + i;
}
i = Llu->bufmax[2];
if (i != 0) {
if (!(Llu->Usub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * i)))
ABORT ("Malloc fails for Usub_buf_2[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Usub_buf_2[jj + 1] = Llu->Usub_buf_2[jj] + i;
}
i = Llu->bufmax[3];
if (i != 0) {
if (!(Llu->Uval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * i)))
ABORT ("Malloc fails for Uval_buf_2[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Uval_buf_2[jj + 1] = Llu->Uval_buf_2[jj] + i;
}
}
log_memory( (Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1)
* iword +
(Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1)
* dword, stat );
/* creating pointers to the look-ahead buffers */
if (! (Lsub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *))))
ABORT ("Malloc fails for Lsub_buf_2[].");
if (! (Lval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *))))
ABORT ("Malloc fails for Lval_buf_2[].");
if (! (Usub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *))))
ABORT ("Malloc fails for Uval_buf_2[].");
if (! (Uval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *))))
ABORT ("Malloc fails for buf_2[].");
for (i = 0; i <= num_look_aheads; i++) {
Lval_buf_2[i] = Llu->Lval_buf_2[i];
Lsub_buf_2[i] = Llu->Lsub_buf_2[i];
Uval_buf_2[i] = Llu->Uval_buf_2[i];
Usub_buf_2[i] = Llu->Usub_buf_2[i];
}
if (!(msgcnts = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *))))
ABORT ("Malloc fails for msgcnts[].");
if (!(msgcntsU = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *))))
ABORT ("Malloc fails for msgcntsU[].");
for (i = 0; i <= num_look_aheads; i++) {
if (!(msgcnts[i] = SUPERLU_MALLOC (4 * sizeof (int))))
ABORT ("Malloc fails for msgcnts[].");
if (!(msgcntsU[i] = SUPERLU_MALLOC (4 * sizeof (int))))
ABORT ("Malloc fails for msgcntsU[].");
}
if (! (recv_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for recv_reqs_u[].");
if (! (send_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for send_reqs_u[].");
if (! (send_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for send_reqs_u[].");
if (! (recv_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for recv_reqs[].");
for (i = 0; i <= num_look_aheads; i++) {
if (!(recv_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * sizeof (MPI_Request))))
ABORT ("Malloc fails for recv_req_u[i].");
if (!(send_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pr * sizeof (MPI_Request))))
ABORT ("Malloc fails for send_req_u[i].");
if (!(send_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pc * sizeof (MPI_Request))))
ABORT ("Malloc fails for send_reqs[i].");
if (!(recv_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (4 * sizeof (MPI_Request))))
ABORT ("Malloc fails for recv_req[].");
send_reqs[i][0] = send_reqs[i][1] = MPI_REQUEST_NULL;
recv_reqs[i][0] = recv_reqs[i][1] = MPI_REQUEST_NULL;
}
if (!(factored = SUPERLU_MALLOC (nsupers * sizeof (int_t))))
ABORT ("Malloc fails for factored[].");
if (!(factoredU = SUPERLU_MALLOC (nsupers * sizeof (int_t))))
ABORT ("Malloc fails for factoredU[].");
for (i = 0; i < nsupers; i++) factored[i] = factoredU[i] = -1;
log_memory(2 * nsupers * iword, stat);
int num_threads = 1;
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
if (omp_get_thread_num () == 0) {
num_threads = omp_get_num_threads ();
}
}
#endif
#if 0
omp_loop_time = (double *) _mm_malloc (sizeof (double) * num_threads,64);
#else
omp_loop_time = (double *) doubleMalloc_dist(num_threads);
#endif
#if ( PRNTlevel>=1 )
if(!iam) printf(".. Starting with %d OpenMP threads \n", num_threads );
double tt1 = SuperLU_timer_ ();
#endif
nblocks = 0;
ncb = nsupers / Pc;
nrb = nsupers / Pr;
int nstreams = get_num_cuda_streams ();
/* int nstreams = NUM_CUDA_STREAMS; */
/* in order to have dynamic scheduling */
int *full_u_cols;
int *blk_ldu;
#if 0
full_u_cols = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64);
blk_ldu = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64);
#else
full_u_cols = SUPERLU_MALLOC(ncb * sizeof(int));
blk_ldu = SUPERLU_MALLOC(ncb * sizeof(int));
#endif
log_memory(2 * ncb * iword, stat);
/* array holding last column blk for each partition,
used in SchCompUdt--CUDA.c */
#if 0
int *stream_end_col = (int_t *) _mm_malloc (sizeof (int_t) * nstreams,64);
#else
int *stream_end_col = SUPERLU_MALLOC( nstreams * sizeof(int) );
#endif
/* insert a check condition here */
#if 0 /* Sherry: not used? */
/* This bunch is used for static scheduling */
pair *full_col_count = (pair *) _mm_malloc (sizeof (pair) * ncb,64);
int_t *count_cols, *sum_cols, *partition;
count_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64);
sum_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64);
partition = (int_t *) _mm_malloc (sizeof (int_t) * num_threads * ncb,64);
int_t ldp = ncb;
#endif
/* ##################################################################
* Compute a good static schedule based on the factorization task graph.
* ################################################################## */
perm_c_supno = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t));
iperm_c_supno = perm_c_supno + nsupers;
static_schedule(options, m, n, LUstruct, grid, stat,
perm_c_supno, iperm_c_supno, info);
#if ( DEBUGlevel >= 2 )
PrintInt10("schedule:perm_c_supno", nsupers, perm_c_supno);
printf("[%d] .. Turn off static schedule for debugging ..\n", iam);
/* Turn off static schedule */
for (i = 0; i < nsupers; ++i) perm_c_supno[i] = iperm_c_supno[i] = i;
#endif
/* ################################################################## */
/* constructing look-ahead table to indicate the last dependency */
int *look_ahead_l;
stat->num_look_aheads = num_look_aheads;
look_ahead_l = SUPERLU_MALLOC (nsupers * sizeof (int));
look_ahead = SUPERLU_MALLOC (nsupers * sizeof (int));
for (lb = 0; lb < nsupers; lb++) look_ahead_l[lb] = -1;
log_memory(3 * nsupers * iword, stat);
/* go through U-factor */
for (lb = 0; lb < nrb; ++lb) {
ib = lb * Pr + myrow;
index = Llu->Ufstnz_br_ptr[lb];
if (index) { /* Not an empty row */
k = BR_HEADER;
for (j = 0; j < index[0]; ++j) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += UB_DESCRIPTOR + SuperSize (index[k]);
}
}
}
if (myrow < nsupers % grid->nprow) {
ib = nrb * Pr + myrow;
index = Llu->Ufstnz_br_ptr[nrb];
if (index) { /* Not an empty row */
k = BR_HEADER;
for (j = 0; j < index[0]; ++j) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += UB_DESCRIPTOR + SuperSize (index[k]);
}
}
}
if (options->SymPattern == NO) {
/* go through L-factor */
for (lb = 0; lb < ncb; lb++) {
ib = lb * Pc + mycol;
index = Llu->Lrowind_bc_ptr[lb];
if (index) {
k = BC_HEADER;
for (j = 0; j < index[0]; j++) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += LB_DESCRIPTOR + index[k + 1];
}
}
}
if (mycol < nsupers % grid->npcol) {
ib = ncb * Pc + mycol;
index = Llu->Lrowind_bc_ptr[ncb];
if (index) {
k = BC_HEADER;
for (j = 0; j < index[0]; j++) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += LB_DESCRIPTOR + index[k + 1];
}
}
}
}
MPI_Allreduce (look_ahead_l, look_ahead, nsupers, MPI_INT, MPI_MAX, grid->comm);
SUPERLU_FREE (look_ahead_l);
#ifdef ISORT
iperm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t));
perm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t));
#else
perm_u = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t));
#endif
log_memory(nsupers * iword, stat);
#if ( PRNTlevel>=1 )
if (grid->iam == 0)
printf (" * init: %e seconds\n", SuperLU_timer_ () - tt1);
#endif
k = sp_ienv_dist (3); /* max supernode size */
#if 0
if ( !(Llu->ujrow = doubleMalloc_dist(k*(k+1)/2)) )
ABORT("Malloc fails for ujrow[].");
#else
/* Instead of half storage, we'll do full storage */
if (!(Llu->ujrow = doubleMalloc_dist (k * k)))
ABORT ("Malloc fails for ujrow[].");
log_memory(k * k * iword, stat);
#endif
#if ( PRNTlevel>=1 )
if (!iam) {
printf (".. thresh = s_eps %e * anorm %e = %e\n", s_eps, anorm,
thresh);
printf
(".. Buffer size: Lsub %ld\tLval %ld\tUsub %ld\tUval %ld\tLDA %ld\n",
(long int) Llu->bufmax[0], (long int) Llu->bufmax[1],
(long int) Llu->bufmax[2], (long int) Llu->bufmax[3],
(long int) Llu->bufmax[4]);
}
#endif
Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
Unzval_br_ptr = Llu->Unzval_br_ptr;
ToRecv = Llu->ToRecv;
ToSendD = Llu->ToSendD;
ToSendR = Llu->ToSendR;
ldt = sp_ienv_dist (3); /* Size of maximum supernode */
k = CEILING (nsupers, Pr); /* Number of local block rows */
/* Following circuit is for finding maximum block size */
int local_max_row_size = 0;
int max_row_size;
for (int i = 0; i < nsupers; ++i) {
int tpc = PCOL (i, grid);
if (mycol == tpc) {
lk = LBj (i, grid);
lsub = Lrowind_bc_ptr[lk];
if (lsub != NULL) {
local_max_row_size = SUPERLU_MAX (local_max_row_size, lsub[1]);
}
}
}
/* Max row size is global reduction of within A row */
MPI_Allreduce (&local_max_row_size, &max_row_size, 1, MPI_INT, MPI_MAX, (grid->rscp.comm));
/* Buffer size is max of of look ahead window */
/* int_t buffer_size =
SUPERLU_MAX (max_row_size * num_threads * ldt,
get_max_buffer_size ()); */
int cublas_nb = get_cublas_nb();
#ifdef GPU_ACC
int buffer_size = SUPERLU_MAX(max_row_size*nstreams*cublas_nb,get_max_buffer_size());
#else
int Threads_per_process = get_thread_per_process();
int buffer_size = SUPERLU_MAX(max_row_size*Threads_per_process*ldt,get_max_buffer_size());
#endif
/* symmetric assumption */
/* Note that in following expression 8 can be anything
as long as its not too big */
int bigu_size = 8 * sp_ienv_dist (3) * (max_row_size);
#if ( PRNTlevel>=1 )
if(!iam) printf("[%d] .. BIG U size %d \n", iam, bigu_size);
#endif
#ifdef GPU_ACC
// printf("hello 1\n");
double* bigU;
if ( checkCuda(cudaHostAlloc((void**)&bigU, bigu_size * sizeof(double), cudaHostAllocDefault)) )
ABORT("Malloc fails for dgemm buffer U ");
int bigv_size = buffer_size;
#if ( PRNTlevel>=1 )
if (!iam) printf("[%d] .. BIG V size %d\n", iam, bigv_size);
#endif
double* bigV;
if ( checkCuda(cudaHostAlloc((void**)&bigV, bigv_size * sizeof(double) ,cudaHostAllocDefault)) )
ABORT("Malloc fails for dgemm buffer V");
DisplayHeader();
#if ( PRNTlevel>=1 )
printf(" Starting with %d Cuda Streams \n",nstreams );
#endif
cublasHandle_t *handle;
handle = (cublasHandle_t *) SUPERLU_MALLOC(sizeof(cublasHandle_t)*nstreams);
for(int i = 0; i < nstreams; i++) handle[i] = create_handle();
// creating streams
cudaStream_t *streams;
streams = (cudaStream_t *) SUPERLU_MALLOC(sizeof(cudaStream_t)*nstreams);
for (int i = 0; i < nstreams; ++i)
checkCuda( cudaStreamCreate(&streams[i]) );
// allocating data in device
double *dA, *dB, *dC;
cudaError_t cudaStat;
#if 0
// cudaStat = cudaMalloc( (void**)&dA, m*k*sizeof(double));
// HOw much should be the size of dA?
// for time being just making it
// cudaStat = cudaMalloc( (void**)&dA, ((max_row_size*sp_ienv_dist(3)))* sizeof(double));
#endif
cudaStat = cudaMalloc( (void**)&dA, max_row_size*sp_ienv_dist(3)* sizeof(double));
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating A in the device %ld \n",m*k*sizeof(double) );
return 1;
}
// size of B should be max_supernode_size*buffer
cudaStat = cudaMalloc((void**)&dB, bigu_size * sizeof(double));
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating B in the device %ld \n",n*k*sizeof(double));
return 1;
}
cudaStat = cudaMalloc((void**)&dC, buffer_size* sizeof(double) );
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating C in the device \n" );
return 1;
}
stat->gpu_buffer += ( max_row_size * sp_ienv_dist(3)
+ bigu_size + buffer_size ) * dword;
#else /* not CUDA */
double* bigU;
if ( !(bigU = doubleMalloc_dist(bigu_size)) )
ABORT ("Malloc fails for dgemm u buff U");
//Maximum size of of bigU= sqrt(buffsize) ?
int bigv_size = 8 * ldt * ldt * num_threads;
#if ( PRNTlevel>=1 )
if (!iam) printf("[%d] .. BIG V size %d\n", iam, bigv_size);
#endif
double *bigV;
if ( !(bigV = doubleMalloc_dist(bigv_size)) )
ABORT ("Malloc failed for dgemm buffer V");
#endif
log_memory((bigv_size + bigu_size) * dword, stat);
// mlock(bigU,(bigu_size) * sizeof (double));
#if ( PRNTlevel>=1 )
if(!iam) {
printf (" Max row size is %d \n", max_row_size);
printf (" Using buffer_size of %d \n", buffer_size);
printf (" Threads per process %d \n", num_threads);
}
#endif
if (!(tempv2d = doubleCalloc_dist (2 * ((size_t) ldt) * ldt)))
ABORT ("Calloc fails for tempv2d[].");
tempU2d = tempv2d + ldt * ldt;
if (!(indirect = SUPERLU_MALLOC (ldt * num_threads * sizeof(int))))
ABORT ("Malloc fails for indirect[].");
if (!(indirect2 = SUPERLU_MALLOC (ldt * num_threads * sizeof(int))))
ABORT ("Malloc fails for indirect[].");
if (!(iuip = intMalloc_dist (k))) ABORT ("Malloc fails for iuip[].");
if (!(ruip = intMalloc_dist (k))) ABORT ("Malloc fails for ruip[].");
log_memory(2 * ldt *ldt * dword + 2 * ldt * num_threads * iword
+ 2 * k * iword, stat);
int_t *lookAheadFullRow,*lookAheadStRow,*lookAhead_lptr,*lookAhead_ib,
*RemainFullRow,*RemainStRow,*Remain_lptr,*Remain_ib;
lookAheadFullRow = intMalloc_dist( (num_look_aheads+1) );
lookAheadStRow = intMalloc_dist( (num_look_aheads+1) );
lookAhead_lptr = intMalloc_dist( (num_look_aheads+1) );
lookAhead_ib = intMalloc_dist( (num_look_aheads+1) );
int_t mrb= (nsupers+Pr-1) / Pr;
int_t mcb= (nsupers+Pc-1) / Pc;
RemainFullRow = intMalloc_dist(mrb);
RemainStRow = intMalloc_dist(mrb);
#if 0
Remain_lptr = (int *) _mm_malloc(sizeof(int)*mrb,1);
#else
Remain_lptr = intMalloc_dist(mrb);
#endif
// mlock(Remain_lptr, sizeof(int)*mrb );
Remain_ib = intMalloc_dist(mrb);
Remain_info_t *Remain_info;
#if 0
Remain_info = (Remain_info_t *) _mm_malloc(mrb*sizeof(Remain_info_t),64);
#else
Remain_info = (Remain_info_t *) SUPERLU_MALLOC(mrb*sizeof(Remain_info_t));
#endif
log_memory(4 * mrb * iword + mrb * sizeof(Remain_info_t), stat);
double *lookAhead_L_buff, *Remain_L_buff;
Ublock_info_t *Ublock_info;
ldt = sp_ienv_dist (3); /* max supernode size */
lookAhead_L_buff = doubleMalloc_dist(ldt*ldt* (num_look_aheads+1) );
log_memory(ldt * ldt * (num_look_aheads+1) * dword, stat);
#if 0
Remain_L_buff = (double *) _mm_malloc( sizeof(double)*(Llu->bufmax[1]),64);
Ublock_info = (Ublock_info_t *) _mm_malloc(mcb*sizeof(Ublock_info_t),64);
int * Ublock_info_iukp = (int *) _mm_malloc(mcb*sizeof(int),64);
int * Ublock_info_rukp = (int *) _mm_malloc(mcb*sizeof(int),64);
int * Ublock_info_jb = (int *) _mm_malloc(mcb*sizeof(int),64);
#else
Remain_L_buff = doubleMalloc_dist(Llu->bufmax[1]);
Ublock_info = (Ublock_info_t *) SUPERLU_MALLOC(mcb*sizeof(Ublock_info_t));
int *Ublock_info_iukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
int *Ublock_info_rukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
int *Ublock_info_jb = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
#endif
log_memory(Llu->bufmax[1] * dword, stat);
double NetSchurUpTimer = 0;
double pdgstrfTimer= SuperLU_timer_();
/* ##################################################################
** Handle first block column separately to start the pipeline. **
################################################################## */
look_id = 0;
msgcnt = msgcnts[0];
send_req = send_reqs[0];
recv_req = recv_reqs[0];
k0 = 0;
k = perm_c_supno[0];
kcol = PCOL (k, grid);
krow = PROW (k, grid);
if (mycol == kcol) {
double ttt1 = SuperLU_timer_();
/* panel factorization */
PDGSTRF2 (options, k0, k, thresh, Glu_persist, grid, Llu,
U_diag_blk_send_req, tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_()-ttt1;
scp = &grid->rscp; /* The scope of process row. */
/* Multicasts numeric values of L(:,0) to process rows. */
lk = LBj (k, grid); /* Local block number. */
lsub = Lrowind_bc_ptr[lk];
lusup = Lnzval_bc_ptr[lk];
if (lsub) {
msgcnt[0] = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub[1] * SuperSize (k);
} else {
msgcnt[0] = msgcnt[1] = 0;
}
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Isend (lsub, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, 0) /* 0 */ ,
scp->comm, &send_req[pj]);
MPI_Isend (lusup, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, 0) /* 1 */ ,
scp->comm, &send_req[pj + Pc]);
#if ( DEBUGlevel>=2 )
printf ("[%d] first block cloumn Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n",
iam, 0, msgcnt[0], msgcnt[1], pj);
#endif
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[0] * iword + msgcnt[1] * dword;
#endif
} /* end if */
} /* end for pj ... */
} else { /* Post immediate receives. */
if (ToRecv[k] >= 1) { /* Recv block column L(:,0). */
scp = &grid->rscp; /* The scope of process row. */
MPI_Irecv (Lsub_buf_2[0], Llu->bufmax[0], mpi_int_t, kcol,
SLU_MPI_TAG (0, 0) /* 0 */ ,
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[0], Llu->bufmax[1], MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, 0) /* 1 */ ,
scp->comm, &recv_req[1]);
}
} /* end if mycol == 0 */
factored[k] = 0; /* flag column k as factored. */
/* post receive of first U-row */
if (myrow != krow) {
if (ToRecv[k] == 2) { /* Recv block row U(k,:). */
scp = &grid->cscp; /* The scope of process column. */
Usub_buf = Llu->Usub_buf_2[0];
Uval_buf = Llu->Uval_buf_2[0];
MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
SLU_MPI_TAG (2, 0) /* 2%tag_ub */ ,
scp->comm, &recv_reqs_u[0][0]);
MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow,
SLU_MPI_TAG (3, 0) /* 3%tag_ub */ ,
scp->comm, &recv_reqs_u[0][1]);
}
}
/* ##################################################################
**** MAIN LOOP ****
################################################################## */
for (k0 = 0; k0 < nsupers; ++k0) {
k = perm_c_supno[k0];
/* ============================================ *
* ======== look-ahead the new columns ======== *
* ============================================ */
/* tt1 = SuperLU_timer_(); */
if (k0 == 0) { /* look-ahead all the columns in the window */
kk1 = k0 + 1;
kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
} else { /* look-ahead one new column after the current window */
kk1 = k0 + num_look_aheads;
kk2 = SUPERLU_MIN (kk1, nsupers - 1);
}
for (kk0 = kk1; kk0 <= kk2; kk0++) {
/* loop through look-ahead window */
kk = perm_c_supno[kk0]; /* use the ordering from static schedule */
look_id = kk0 % (1 + num_look_aheads); /* which column in window */
if (look_ahead[kk] < k0) { /* does not depend on current column */
kcol = PCOL (kk, grid);
if (mycol == kcol) {
/* Panel factorization -- Factor diagonal and subdiagonal
L blocks and test for exact singularity. */
factored[kk] = 0; /* flag column kk as factored */
double ttt1 = SuperLU_timer_();
PDGSTRF2 (options, kk0, kk, thresh, Glu_persist,
grid, Llu, U_diag_blk_send_req, tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_() - ttt1;
/* Multicasts numeric values of L(:,kk) to process rows. */
/* ttt1 = SuperLU_timer_(); */
msgcnt = msgcnts[look_id]; /* point to the proper count array */
send_req = send_reqs[look_id];
lk = LBj (kk, grid); /* Local block number. */
lsub1 = Lrowind_bc_ptr[lk];
if (lsub1) {
msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub1[1] * SuperSize (kk);
} else {
msgcnt[0] = 0;
msgcnt[1] = 0;
}
scp = &grid->rscp; /* The scope of process row. */
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
lusup1 = Lnzval_bc_ptr[lk];
MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj,
SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &send_req[pj]);
MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &send_req[pj + Pc]);
#if ( DEBUGlevel>=2 )
printf ("[%d] -1- Send L(:,%4d): #lsub1 %4d, #lusup1 %4d right to Pj %2d\n",
iam, kk, msgcnt[0], msgcnt[1], pj);
#endif
}
}
/* stat->time9 += SuperLU_timer_() - ttt1; */
} else { /* Post Recv of block column L(:,kk). */
/* double ttt1 = SuperLU_timer_(); */
if (ToRecv[kk] >= 1) {
scp = &grid->rscp; /* The scope of process row. */
recv_req = recv_reqs[look_id];
MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0],
mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1],
MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &recv_req[1]);
}
/* stat->time10 += SuperLU_timer_() - ttt1; */
} /* end if mycol == Pc(kk) */
} /* end if look-ahead */
/* post irecv for U-row look-ahead */
krow = PROW (kk, grid);
if (myrow != krow) {
if (ToRecv[kk] == 2) { /* post iRecv block row U(k,:). */
scp = &grid->cscp; /* The scope of process column. */
Usub_buf = Llu->Usub_buf_2[look_id];
Uval_buf = Llu->Uval_buf_2[look_id];
MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
SLU_MPI_TAG (2, kk0) /* (4*kk0+2)%tag_ub */ ,
scp->comm, &recv_reqs_u[look_id][0]);
MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow,
SLU_MPI_TAG (3, kk0) /* (4*kk0+3)%tag_ub */ ,
scp->comm, &recv_reqs_u[look_id][1]);
}
}
} /* end for each column in look-ahead window */
/* stat->time4 += SuperLU_timer_()-tt1; */
/* ================================= *
* == looking-ahead the U columns == *
* ================================= */
kk1 = k0;
kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
for (kk0 = kk1; kk0 < kk2; kk0++) {
kk = perm_c_supno[kk0];
if (factoredU[kk0] != 1 && look_ahead[kk] < k0) {
kcol = PCOL (kk, grid);
krow = PROW (kk, grid);
lk = LBj (kk, grid); /* Local block number. */
look_id = kk0 % (1 + num_look_aheads);
msgcnt = msgcntsU[look_id];
recv_req = recv_reqs[look_id];
/* ================================================= *
* checking if diagonal block has been received *
* for panel factorization of U in look-ahead window *
* ================================================= */
if (mycol == kcol) {
flag0 = flag1 = 1;
msgcnt[0] = msgcnt[1] = -1;
} else {
flag0 = flag1 = 0;
if (ToRecv[kk] >= 1) {
if (recv_req[0] != MPI_REQUEST_NULL) {
MPI_Test (&recv_req[0], &flag0, &status);
if (flag0) {
MPI_Get_count (&status, mpi_int_t, &msgcnt[0]);
recv_req[0] = MPI_REQUEST_NULL;
}
} else flag0 = 1;
if (recv_req[1] != MPI_REQUEST_NULL) {
MPI_Test (&recv_req[1], &flag1, &status);
if (flag1) {
MPI_Get_count (&status, mpi_int_t, &msgcnt[1]);
recv_req[1] = MPI_REQUEST_NULL;
}
} else flag1 = 1;
} else msgcnt[0] = 0;
}
if (flag0 && flag1) {
/* tt1 = SuperLU_timer_(); */
scp = &grid->cscp; /* The scope of process column. */
if (myrow == krow) {
factoredU[kk0] = 1;
/* Parallel triangular solve across process row *krow* --
U(k,j) = L(k,k) \ A(k,j). */
/* double ttt2 = SuperLU_timer_(); */
double ttt2 = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel
#endif
{
PDGSTRS2 (kk0, kk, Glu_persist, grid, Llu,
stat);
}
pdgstrs2_timer += SuperLU_timer_()-ttt2;
/* stat->time8 += SuperLU_timer_()-ttt2; */
/* Multicasts U(k,:) to process columns. */
lk = LBi (kk, grid);
usub = Ufstnz_br_ptr[lk];
uval = Unzval_br_ptr[lk];
if (usub) {
msgcnt[2] = usub[2];
msgcnt[3] = usub[1];
} else {
msgcnt[2] = msgcnt[3] = 0;
}
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Isend (usub, msgcnt[2], mpi_int_t, pi,
SLU_MPI_TAG (2, kk0), /* (4*kk0+2)%tag_ub */
scp->comm, &send_reqs_u[look_id][pi]);
MPI_Isend (uval, msgcnt[3], MPI_DOUBLE,
pi, SLU_MPI_TAG (3, kk0), /* (4*kk0+3)%tag_ub */
scp->comm, &send_reqs_u[look_id][pi + Pr]);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[2] * iword + msgcnt[3] * dword;
#endif
#if ( DEBUGlevel>=2 )
printf ("[%d] Send U(%4d,:) to Pr %2d\n",
iam, k, pi);
#endif
} /* if pi ... */
} /* for pi ... */
} /* if ToSendD ... */
/* stat->time2 += SuperLU_timer_()-tt1; */
} /* end if myrow == krow */
} /* end if flag0 ... */
} /* end if factoredU[] ... */
} /* end for kk0 ... */
/* ============================================== *
* == start processing the current row of U == *
* ============================================== */
knsupc = SuperSize (k);
krow = PROW (k, grid);
kcol = PCOL (k, grid);
/* tt1 = SuperLU_timer_(); */
look_id = k0 % (1 + num_look_aheads);
recv_req = recv_reqs[look_id];
send_req = send_reqs[look_id];
msgcnt = msgcnts[look_id];
Usub_buf = Llu->Usub_buf_2[look_id];
Uval_buf = Llu->Uval_buf_2[look_id];
if (mycol == kcol) {
lk = LBj (k, grid); /* Local block number. */
for (pj = 0; pj < Pc; ++pj) {
/* Wait for Isend to complete before using lsub/lusup. */
if (ToSendR[lk][pj] != EMPTY) {
MPI_Wait (&send_req[pj], &status);
MPI_Wait (&send_req[pj + Pc], &status);
}
}
lsub = Lrowind_bc_ptr[lk];
lusup = Lnzval_bc_ptr[lk];
} else {
if (ToRecv[k] >= 1) { /* Recv block column L(:,k). */
scp = &grid->rscp; /* The scope of process row. */
/* ============================================ *
* waiting for L(:,kk) for outer-product uptate *
* if iam in U(kk,:) then *
* the diagonal block did not reach in time *
* for panel factorization of U(k,:) *
* ============================================ */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
if (recv_req[0] != MPI_REQUEST_NULL) {
MPI_Wait (&recv_req[0], &status);
MPI_Get_count (&status, mpi_int_t, &msgcnt[0]);
recv_req[0] = MPI_REQUEST_NULL;
} else {
msgcnt[0] = msgcntsU[look_id][0];
#if (DEBUGlevel>=2)
printf("\t[%d] k=%d, look_id=%d, recv_req[0] == MPI_REQUEST_NULL, msgcnt[0] = %d\n",
iam, k, look_id, msgcnt[0]);
#endif
}
if (recv_req[1] != MPI_REQUEST_NULL) {
MPI_Wait (&recv_req[1], &status);
MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[1]);
recv_req[1] = MPI_REQUEST_NULL;
} else {
msgcnt[1] = msgcntsU[look_id][1];
#if (DEBUGlevel>=2)
printf("\t[%d] k=%d, look_id=%d, recv_req[1] == MPI_REQUEST_NULL, msgcnt[1] = %d\n",
iam, k, look_id, msgcnt[1]);
#endif
}
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
#endif
#if ( DEBUGlevel>=2 )
printf("[%d] Recv L(:,%4d): #lsub %4d, #lusup %4d from Pc %2d\n",
iam, k, msgcnt[0], msgcnt[1], kcol);
fflush (stdout);
#endif
#if ( PRNTlevel==3 )
++total_msg;
if (!msgcnt[0]) ++zero_msg;
#endif
} else {
msgcnt[0] = 0;
}
lsub = Lsub_buf_2[look_id];
lusup = Lval_buf_2[look_id];
} /* if mycol = Pc(k) */
/* stat->time1 += SuperLU_timer_()-tt1; */
scp = &grid->cscp; /* The scope of process column. */
/* tt1 = SuperLU_timer_(); */
if (myrow == krow) {
lk = LBi (k, grid);
usub = Ufstnz_br_ptr[lk];
uval = Unzval_br_ptr[lk];
if (factoredU[k0] == -1) {
/* Parallel triangular solve across process row *krow* --
U(k,j) = L(k,k) \ A(k,j). */
double ttt2 = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel
#endif
{
PDGSTRS2 (k0, k, Glu_persist, grid, Llu, stat);
}
pdgstrs2_timer += SuperLU_timer_() - ttt2;
/* Multicasts U(k,:) along process columns. */
if (usub) {
msgcnt[2] = usub[2];
msgcnt[3] = usub[1];
} else {
msgcnt[2] = msgcnt[3] = 0;
}
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Send (usub, msgcnt[2], mpi_int_t, pi,
SLU_MPI_TAG (2, k0), /* (4*k0+2)%tag_ub */
scp->comm);
MPI_Send (uval, msgcnt[3], MPI_DOUBLE, pi,
SLU_MPI_TAG (3, k0), /* (4*k0+3)%tag_ub */
scp->comm);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[2] * iword + msgcnt[3] * dword;
#endif
#if ( DEBUGlevel>=2 )
printf ("[%d] Send U(%4d,:) down to Pr %2d\n", iam, k, pi);
#endif
} /* if pi ... */
} /* for pi ... */
} /* if ToSendD ... */
} else {
/* =========================================== *
* waiting for U(k,:) for outer-product update *
* =========================================== */
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
MPI_Wait (&send_reqs_u[look_id][pi], &status);
MPI_Wait (&send_reqs_u[look_id][pi + Pr], &status);
}
}
}
msgcnt[2] = msgcntsU[look_id][2];
msgcnt[3] = msgcntsU[look_id][3];
}
/* stat->time2 += SuperLU_timer_()-tt1; */
} else { /* myrow != krow */
/* ========================================= *
* wait for U(k,:) for outer-product updates *
* ========================================= */
if (ToRecv[k] == 2) { /* Recv block row U(k,:). */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Wait (&recv_reqs_u[look_id][0], &status);
MPI_Get_count (&status, mpi_int_t, &msgcnt[2]);
MPI_Wait (&recv_reqs_u[look_id][1], &status);
MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[3]);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
#endif
usub = Usub_buf;
uval = Uval_buf;
#if ( DEBUGlevel>=2 )
printf ("[%d] Recv U(%4d,:) from Pr %2d\n", iam, k, krow);
#endif
#if ( PRNTlevel==3 )
++total_msg;
if (!msgcnt[2]) ++zero_msg;
#endif
} else {
msgcnt[2] = 0;
}
/* stat->time6 += SuperLU_timer_()-tt1; */
} /* if myrow == Pr(k) */
/*
* Parallel rank-k update; pair up blocks L(i,k) and U(k,j).
* for (j = k+1; k < N; ++k) {
* for (i = k+1; i < N; ++i)
* if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
* && L(i,k) != 0 && U(k,j) != 0 )
* A(i,j) = A(i,j) - L(i,k) * U(k,j);
*/
msg0 = msgcnt[0];
msg2 = msgcnt[2];
/* tt1 = SuperLU_timer_(); */
if (msg0 && msg2) { /* L(:,k) and U(k,:) are not empty. */
nsupr = lsub[1]; /* LDA of lusup. */
if (myrow == krow) { /* Skip diagonal block L(k,k). */
lptr0 = BC_HEADER + LB_DESCRIPTOR + lsub[BC_HEADER + 1];
luptr0 = knsupc;
nlb = lsub[0] - 1;
} else {
lptr0 = BC_HEADER;
luptr0 = 0;
nlb = lsub[0];
}
iukp = BR_HEADER; /* Skip header; Pointer to index[] of U(k,:) */
rukp = 0; /* Pointer to nzval[] of U(k,:) */
nub = usub[0]; /* Number of blocks in the block row U(k,:) */
klst = FstBlockC (k + 1);
/* -------------------------------------------------------------
Update the look-ahead block columns A(:,k+1:k+num_look_ahead)
------------------------------------------------------------- */
iukp0 = iukp;
rukp0 = rukp;
/* reorder the remaining columns in bottome-up */
/* TAU_STATIC_TIMER_START("LOOK_AHEAD_UPDATE"); */
for (jj = 0; jj < nub; jj++) {
#ifdef ISORT
iperm_u[jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */
perm_u[jj] = jj;
#else
perm_u[2 * jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */
perm_u[2 * jj + 1] = jj;
#endif
jb = usub[iukp]; /* Global block number of block U(k,j). */
nsupc = SuperSize (jb);
iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */
iukp += nsupc;
}
iukp = iukp0;
#ifdef ISORT
isort (nub, iperm_u, perm_u);
#else
qsort (perm_u, (size_t) nub, 2 * sizeof (int_t),
&superlu_sort_perm);
#endif
j = jj0 = 0;
/************************************************************************/
double ttx =SuperLU_timer_();
#include "dlook_ahead_update.c"
lookaheadupdatetimer += SuperLU_timer_() - ttx;
/************************************************************************/
/*ifdef OMP_LOOK_AHEAD */
/* TAU_STATIC_TIMER_STOP("LOOK_AHEAD_UPDATE"); */
} /* if L(:,k) and U(k,:) not empty */
/* stat->time3 += SuperLU_timer_()-tt1; */
/* ================== */
/* == post receive == */
/* ================== */
kk1 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
for (kk0 = k0 + 1; kk0 <= kk1; kk0++) {
kk = perm_c_supno[kk0];
kcol = PCOL (kk, grid);
if (look_ahead[kk] == k0) {
if (mycol != kcol) {
if (ToRecv[kk] >= 1) {
scp = &grid->rscp; /* The scope of process row. */
look_id = kk0 % (1 + num_look_aheads);
recv_req = recv_reqs[look_id];
MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0],
mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1],
MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &recv_req[1]);
}
} else {
lk = LBj (kk, grid); /* Local block number. */
lsub1 = Lrowind_bc_ptr[lk];
lusup1 = Lnzval_bc_ptr[lk];
if (factored[kk] == -1) {
/* Factor diagonal and subdiagonal blocks and
test for exact singularity. */
factored[kk] = 0; /* flag column kk as factored */
double ttt1 = SuperLU_timer_();
PDGSTRF2 (options, kk0, kk, thresh,
Glu_persist, grid, Llu, U_diag_blk_send_req,
tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_() - ttt1;
/* Process column *kcol+1* multicasts numeric
values of L(:,k+1) to process rows. */
look_id = kk0 % (1 + num_look_aheads);
send_req = send_reqs[look_id];
msgcnt = msgcnts[look_id];
if (lsub1) {
msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub1[1] * SuperSize (kk);
} else {
msgcnt[0] = 0;
msgcnt[1] = 0;
}
scp = &grid->rscp; /* The scope of process row. */
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj,
SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &send_req[pj]);
MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &send_req[pj + Pc]);
}
}
} /* for pj ... */
}
}
}
double tsch = SuperLU_timer_();
/************************************************************************/
#ifdef GPU_ACC
#include "dSchCompUdt-cuda.c"
#else
/*#include "SchCompUdt--Phi-2Ddynamic-alt.c"*/
#include "dSchCompUdt-2Ddynamic.c"
#endif
/*uncomment following to compare against SuperLU 3.3 baseline*/
/* #include "SchCompUdt--baseline.c" */
/************************************************************************/
NetSchurUpTimer += SuperLU_timer_()-tsch;
} /* for k0 = 0, ... */
/* ##################################################################
** END MAIN LOOP: for k0 = ...
################################################################## */
pdgstrfTimer= SuperLU_timer_()-pdgstrfTimer;
/* updating total flops */
#if ( PRNTlevel>=1 )
if (!iam) {
printf("Time in scattering %lf \n",scatter_timer );
printf("Time in dgemm %lf \n", gemm_timer );
printf("Total time spent in schur update is \t\t: %5.2lf seconds,\n",NetSchurUpTimer );
printf("Total Time in Factorization \t\t: %5.2lf seconds, \n", pdgstrfTimer);
printf("Time (other GEMM and Scatter) \t\t: %5.2lf seconds, \n", pdgstrfTimer-schur_flop_timer);
printf("Total time spent in schur update when offload \t\t: %5.2lf seconds,\n",CPUOffloadTimer );
}
#endif
#if ( DEBUGlevel>=2 )
for (i = 0; i < Pr * Pc; ++i) {
if (iam == i) {
dPrintLblocks(iam, nsupers, grid, Glu_persist, Llu);
dPrintUblocks(iam, nsupers, grid, Glu_persist, Llu);
printf ("(%d)\n", iam);
PrintInt10 ("Recv", nsupers, Llu->ToRecv);
}
MPI_Barrier (grid->comm);
}
#endif
// printf("Debug : MPI buffers 1\n");
/********************************************************
* Free memory *
********************************************************/
if (Pr * Pc > 1) {
SUPERLU_FREE (Lsub_buf_2[0]); /* also free Lsub_buf_2[1] */
SUPERLU_FREE (Lval_buf_2[0]); /* also free Lval_buf_2[1] */
if (Llu->bufmax[2] != 0)
SUPERLU_FREE (Usub_buf_2[0]);
if (Llu->bufmax[3] != 0)
SUPERLU_FREE (Uval_buf_2[0]);
if (U_diag_blk_send_req[myrow] != MPI_REQUEST_NULL) {
/* wait for last Isend requests to complete, deallocate objects */
for (krow = 0; krow < Pr; ++krow) {
if (krow != myrow)
MPI_Wait (U_diag_blk_send_req + krow, &status);
}
}
SUPERLU_FREE (U_diag_blk_send_req);
}
log_memory( -((Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword +
(Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword),
stat );
SUPERLU_FREE (Lsub_buf_2);
SUPERLU_FREE (Lval_buf_2);
SUPERLU_FREE (Usub_buf_2);
SUPERLU_FREE (Uval_buf_2);
SUPERLU_FREE (perm_c_supno);
SUPERLU_FREE (perm_u);
#ifdef ISORT
SUPERLU_FREE (iperm_u);
#endif
SUPERLU_FREE (look_ahead);
SUPERLU_FREE (factoredU);
SUPERLU_FREE (factored);
log_memory(-(6 * nsupers * iword), stat);
for (i = 0; i <= num_look_aheads; i++) {
SUPERLU_FREE (msgcnts[i]);
SUPERLU_FREE (msgcntsU[i]);
}
SUPERLU_FREE (msgcnts);
SUPERLU_FREE (msgcntsU);
for (i = 0; i <= num_look_aheads; i++) {
SUPERLU_FREE (send_reqs_u[i]);
SUPERLU_FREE (recv_reqs_u[i]);
SUPERLU_FREE (send_reqs[i]);
SUPERLU_FREE (recv_reqs[i]);
}
SUPERLU_FREE (recv_reqs_u);
SUPERLU_FREE (send_reqs_u);
SUPERLU_FREE (recv_reqs);
SUPERLU_FREE (send_reqs);
// printf("Debug : MPI buffers 3\n");
#ifdef GPU_ACC
checkCuda (cudaFreeHost (bigV));
checkCuda (cudaFreeHost (bigU));
cudaFree( (void*)dA ); /* Sherry added */
cudaFree( (void*)dB );
cudaFree( (void*)dC );
SUPERLU_FREE( handle );
SUPERLU_FREE( streams );
#else
SUPERLU_FREE (bigV);
SUPERLU_FREE (bigU);
#endif
log_memory(-(bigv_size + bigu_size) * dword, stat);
// printf("Debug : MPI buffers 5\n");
SUPERLU_FREE (Llu->ujrow);
SUPERLU_FREE (tempv2d);
SUPERLU_FREE (indirect);
SUPERLU_FREE (indirect2); /* Sherry added */
SUPERLU_FREE (iuip);
SUPERLU_FREE (ruip);
ldt = sp_ienv_dist(3);
log_memory( -(3 * ldt *ldt * dword + 2 * ldt * num_threads * iword
+ 2 * k * iword), stat );
/* Sherry added */
SUPERLU_FREE(omp_loop_time);
SUPERLU_FREE(full_u_cols);
SUPERLU_FREE(blk_ldu);
log_memory(-2 * ncb * dword, stat);
SUPERLU_FREE(stream_end_col);
SUPERLU_FREE(lookAheadFullRow);
SUPERLU_FREE(lookAheadStRow);
SUPERLU_FREE(lookAhead_lptr);
SUPERLU_FREE(lookAhead_ib);
SUPERLU_FREE(RemainFullRow);
SUPERLU_FREE(RemainStRow);
SUPERLU_FREE(Remain_lptr);
SUPERLU_FREE(Remain_ib);
SUPERLU_FREE(Remain_info);
SUPERLU_FREE(lookAhead_L_buff);
SUPERLU_FREE(Remain_L_buff);
log_memory( -(4 * mrb * iword + mrb * sizeof(Remain_info_t) +
ldt * ldt * (num_look_aheads + 1) * dword +
Llu->bufmax[1] * dword), stat );
SUPERLU_FREE(Ublock_info);
SUPERLU_FREE(Ublock_info_iukp);
SUPERLU_FREE(Ublock_info_rukp);
SUPERLU_FREE(Ublock_info_jb);
#if ( PROFlevel>=1 )
TIC (t1);
#endif
/* Prepare error message - find the smallesr index i that U(i,i)==0 */
if ( *info == 0 ) *info = n + 1;
MPI_Allreduce (info, &iinfo, 1, MPI_INT, MPI_MIN, grid->comm);
if ( iinfo == n + 1 ) *info = 0;
else *info = iinfo;
// printf("test out\n");
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
{
float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum;
MPI_Reduce (&msg_cnt, &msg_cnt_sum,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm);
MPI_Reduce (&msg_cnt, &msg_cnt_max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm);
MPI_Reduce (&msg_vol, &msg_vol_sum,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm);
MPI_Reduce (&msg_vol, &msg_vol_max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm);
if (!iam) {
printf ("\tPDGSTRF comm stat:"
"\tAvg\tMax\t\tAvg\tMax\n"
"\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n",
msg_cnt_sum / Pr / Pc, msg_cnt_max,
msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6);
}
}
#endif
#if ( PRNTlevel==3 )
MPI_Allreduce (&zero_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm);
if (!iam)
printf (".. # msg of zero size\t%d\n", iinfo);
MPI_Allreduce (&total_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm);
if (!iam)
printf (".. # total msg\t%d\n", iinfo);
#endif
#if ( DEBUGlevel>=2 )
for (i = 0; i < Pr * Pc; ++i) {
if (iam == i) {
dPrintLblocks (iam, nsupers, grid, Glu_persist, Llu);
dPrintUblocks (iam, nsupers, grid, Glu_persist, Llu);
printf ("(%d)\n", iam);
PrintInt10 ("Recv", nsupers, Llu->ToRecv);
}
MPI_Barrier (grid->comm);
}
#endif
#if ( DEBUGlevel>=3 )
printf ("(%d) num_copy=%d, num_update=%d\n", iam, num_copy, num_update);
#endif
#if ( DEBUGlevel>=1 )
CHECK_MALLOC (iam, "Exit pdgstrf()");
#endif
return 0;
} /* PDGSTRF */
|
main.c | #include <stdio.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <string.h>
#include "lcg.h"
#include "utilities.h"
#include "render.h"
#define NUM_OF_THREADS 8
#define NUM_OF_ITERATIONS 5000
void citiesInitialization(LCG *lcg,city *cities);
void constructGraph(city *cities,double **adjMatrix);
void pathInitiliazation(LCG *lcg,int *path);
double calcPathDistance(double **adjMatrix,int *path);
double calcDistanceSquared(city c1,city c2);
double calcDistance(city c1,city c2);
void shareData(int path[NUM_OF_THREADS][NUM_OF_CITIES],double pathDistance[NUM_OF_THREADS],int sharedPath[NUM_OF_CITIES],double sharedPathDistance);
int main(int argc, char *argv[]){
city cities[NUM_OF_CITIES];
double **adjMatrix=doubleArray2DInHeap(NUM_OF_CITIES,NUM_OF_CITIES);
int path[NUM_OF_THREADS][NUM_OF_CITIES];
double oldPathDistance[NUM_OF_THREADS];
double newPathDistance[NUM_OF_THREADS];
int batch=0;
int it;
int a,b;
int temp;
int threadID;
char message[100];
LCG lcg=(LCG) {44485709377909ULL,11863279ULL,281474976710656ULL,time(NULL)};
//Core logic=======================================================================================
citiesInitialization(&lcg,cities);
constructGraph(cities,adjMatrix);
pathInitiliazation(&lcg,path[0]);
shareData(path,oldPathDistance,path[0],calcPathDistance(adjMatrix,path[0]));
init();
// In the first iteration, build the city grid without any connections
buildCityGrid(cities, NUM_OF_CITIES);
int quit = 0;
SDL_Event e;
#pragma omp parallel num_threads(NUM_OF_THREADS) private(it,a,b,temp,threadID,lcg)
{
threadID=omp_get_thread_num();
lcg=(LCG) {44485709377909ULL,11863279ULL,281474976710656ULL,&threadID}; //Each thread gets a different seed
while(!quit){
#pragma omp single
{
sprintf(message,"Batch %d, Path Distance = %.2f \n",batch,oldPathDistance[0]); //Each entry in oldPathDistance contains the minimum
printf("%s",message);
clearWindow();
updateIterationText(message);
buildCityGrid(cities, NUM_OF_CITIES);
connectCities(cities, path);
while (SDL_PollEvent(&e) != 0)
{
if (e.type == SDL_QUIT)
{
quit = 1;
}
else if (e.type == SDL_KEYDOWN && e.key.keysym.sym == SDLK_ESCAPE)
{
quit = 1;
}
}
}
for(it=0; it<NUM_OF_ITERATIONS; it++){
//Exchange two cities
a=randINTBetween(&lcg,1,NUM_OF_CITIES-1);
b=randINTBetween(&lcg,1,NUM_OF_CITIES-1);
temp=path[threadID][a];
path[threadID][a]=path[threadID][b];
path[threadID][b]=temp;
//Compare path distances
newPathDistance[threadID]=calcPathDistance(adjMatrix,path[threadID]);
if(oldPathDistance[threadID]<newPathDistance[threadID]){
//Exchange back the two cities
path[threadID][b]=path[threadID][a];
path[threadID][a]=temp;
}
else{
oldPathDistance[threadID]=newPathDistance[threadID];
}
}
#pragma omp barrier
//Find the best thread
#pragma omp single
{
int ind=0;
double min=oldPathDistance[0];
for(int thread=1; thread<NUM_OF_THREADS; thread++){
if(oldPathDistance[thread]<min){
ind=thread;
min=oldPathDistance[thread];
}
}
shareData(path,oldPathDistance,path[ind],min);
}
#pragma omp single
batch++;
}
}
//=================================================================================================
shutDown();
return 0;
}
void citiesInitialization(LCG *lcg,city *cities){
int a;
city *c;
for(a=0; a<NUM_OF_CITIES; a++){
c=&cities[a];
c->x=randDBLBetween(lcg,0.0, (double) X_MAX);
c->y=randDBLBetween(lcg,0.0, (double) Y_MAX);
}
}
void constructGraph(city *cities, double **adjMatrix){
int a;
int b;
for(a=0; a<NUM_OF_CITIES; a++){
for(b=0; b<NUM_OF_CITIES; b++){
adjMatrix[a][b]=calcDistance(cities[a],cities[b]);
}
}
}
void pathInitiliazation(LCG *lcg,int *path){
int a;
int temp;
randomPermutationFrom0toN(lcg,NUM_OF_CITIES-1,path);
for(a=0; a<NUM_OF_CITIES; a++){
if( path[a]==0 ){
break;
}
}
temp=path[0];
path[0]=path[a];
path[a]=temp;
}
double calcPathDistance(double **adjMatrix,int *path){
double d=0.0;
int a;
for(a=0; a<=NUM_OF_CITIES-2; a++){
d+=adjMatrix[path[a]][path[a+1]];
}
d+=adjMatrix[path[NUM_OF_CITIES-1]][path[0]];
return d;
}
double calcDistanceSquared(city c1,city c2){
double x_dif=c1.x-c2.x;
double y_dif=c1.y-c2.y;
return x_dif*x_dif+y_dif*y_dif;
}
double calcDistance(city c1,city c2){
return sqrt(calcDistanceSquared(c1,c2));
}
void shareData(int path[NUM_OF_THREADS][NUM_OF_CITIES],double pathDistance[NUM_OF_THREADS],int sharedPath[NUM_OF_CITIES],double sharedPathDistance){
int thread;
for(thread=0; thread<NUM_OF_THREADS; thread++){
memcpy(path[thread],sharedPath,NUM_OF_CITIES*sizeof(int));
pathDistance[thread]=sharedPathDistance;
}
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertAdobe98ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertDisplayP3ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertProPhotoToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToAdobe98(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToAdobe98(X,Y,Z,r,g,b);
}
static void ConvertRGBToDisplayP3(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToDisplayP3(X,Y,Z,r,g,b);
}
static void ConvertRGBToProPhoto(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToProPhoto(X,Y,Z,r,g,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,const IlluminantType illuminant,double *L,double *u,
double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(DecodePixelGamma(gray)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case Adobe98Colorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case Adobe98Colorspace:
{
ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z);
break;
}
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case DisplayP3Colorspace:
{
ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case ProPhotoColorspace:
{
ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002*
PerceptibleReciprocal(film_gamma)))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
const char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
if (IdentifyImageMonochrome(image,exception) == MagickFalse)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=BilevelType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
gray=EncodePixelGamma(gray);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case Adobe98Colorspace:
case CMYColorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case Adobe98Colorspace:
{
ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case DisplayP3Colorspace:
{
ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case ProPhotoColorspace:
{
ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma))-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
monte_carlo.h | #ifndef monte_carlo_h
#define monte_carlo_h
#include <omp.h>
#include <algorithm>
#include <armadillo>
#include <cassert>
#include <chrono>
#include <cmath>
#include <experimental/filesystem>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <list>
#include <map>
#include <sstream>
#include <thread>
#include "../helper/utility.h"
#include "../helper/prepare_directory.hpp"
#include "../helper/constants.h"
#include "../helper/progress.hpp"
#include "../../lib/json.hpp"
#include "../exciton_transfer/cnt.h"
#include "../exciton_transfer/exciton_transfer.h"
#include "./particle.h"
#include "./scatterer.h"
#include "./scattering_struct.h"
namespace mc
{
class monte_carlo {
private:
typedef std::experimental::filesystem::path path_t;
typedef std::experimental::filesystem::directory_entry directory_t;
typedef std::pair<arma::vec, arma::vec> domain_t;
typedef std::vector<std::vector<scatterer*>> bucket_t;
typedef std::pair<double, double> limit_t;
// elapsed simulation time
double _time;
// maximum hopping radius considered in the simulation
double _max_hopping_radius;
// input properties of the whole mc simulation in json format
nlohmann::json _json_prop;
// list of all scatterer object in the simulation
std::vector<scatterer> _all_scat_list;
// list of quenching sites in the simulation
std::vector<scatterer> _quenching_list;
// minimum and maximum coordinates of the simulation domain
domain_t _domain;
// number of particles in the contacts
unsigned _c1_pop, _c2_pop;
// this is the address of the output_directory and input_directory
directory_t _output_directory, _input_directory;
// instantiation of the scattering table for discrete mesh points
std::vector<std::vector<scattering_struct>> _scat_tables;
// pointers to scatterers to divide the scatterers into multiple buckets based on their position in space
bucket_t _scat_buckets;
// pointers to quenching sites to divide the quenching sites into multiple buckets based on their position in space
bucket_t _q_buckets;
// pointers to scatterers in contact 1 and 2
std::vector<const scatterer*> _c1_scat, _c2_scat;
// number of segments that defines contacts
unsigned _n_seg=0;
// area profile of the structure along the y-axis
std::vector<double> _area;
// list of all particles in the simulation
std::vector<particle> _particle_list;
// file objects for saving population profile and current data
std::fstream _pop_file, _curr_file;
double _particle_velocity=0;
//**************************************************************
// this section holds variables specific to the green-kubo method
// of calculating diffusion coefficient
//**************************************************************
// list of scatterers in the injection region
std::vector<const scatterer *> _inject_scats;
// domain limits to remove the particles and inject them in the injection region
domain_t _removal_domain;
// maximum time for the kubo simulation
double _max_time;
// file to record particle dispalcements
std::fstream _displacement_file_x, _displacement_file_y, _displacement_file_z;
// file to record average of square of displacements in each direction
std::fstream _displacement_squard_file;
std::fstream _diffusion_tensor_file;
std::fstream _diffusion_length_file;
//**************************************************************
//**************************************************************
public:
// default constructor
monte_carlo() = delete;
// constructure with json input file
monte_carlo(const nlohmann::json& j) {
std::cout << "\n"
<< "ready properties from json file"
<< "\n";
// store the json properties for use in other methods
_json_prop = j;
// set the output directory
std::string directory_path = j["output directory"];
bool keep_old_data = true;
if (j.count("keep old results") == 1) {
keep_old_data = j["keep old results"];
}
_output_directory = prepare_directory(directory_path, keep_old_data);
// set the input directory for mesh information
directory_path = j["mesh input directory"];
_input_directory = check_directory(directory_path, false);
};
// get the mc simulation time
const double& time() const { return _time; };
// get constant reference to the output_directory
const directory_t& output_directory() const { return _output_directory; };
// get constant reference to the input_directory
const directory_t& input_directory() const { return _input_directory; };
// get constant reference to the output_directory
const path_t& output_path() const { return _output_directory.path(); };
// get constant reference to the input_directory
const path_t& input_path() const { return _input_directory.path(); };
// returns the number of particles
unsigned number_of_particles() const { return _particle_list.size(); };
double calc_diam(int _m, int _n){
double _a_cc = 1.42e-10; // carbon-carbon distance [nm]
double _a_l = std::sqrt(float(3.0))*_a_cc; // graphene lattice constants [nm]
double _circum = _a_l*std::sqrt(float(_n*_n+_m*_m+_n*_m));
double pi=3.141592;
return (_circum/pi);
}
// initialize the simulation condition
void init() {
_max_hopping_radius = double(_json_prop["max hopping radius [m]"]);
std::cout << "maximum hopping radius: " << _max_hopping_radius * 1.e9 << " [nm]\n";
_particle_velocity = _json_prop["exciton velocity [m/s]"];
std::cout << "exciton velocity [m/s]: " << _particle_velocity << std::endl;
_n_seg = _json_prop["number of segments"];
std::cout << "number of segments: " << _n_seg << std::endl;
_scat_tables = create_scattering_table(_json_prop);
_all_scat_list = create_scatterers(_input_directory.path());
limit_t xlim = _json_prop["trim limits"]["xlim"];
limit_t ylim = _json_prop["trim limits"]["ylim"];
limit_t zlim = _json_prop["trim limits"]["zlim"];
trim_scats(xlim, ylim, zlim, _all_scat_list);
std::cout << "total number of scatterers: " << _all_scat_list.size() << std::endl;
set_scat_table(_scat_tables[0][0], _all_scat_list);
_domain = find_simulation_domain();
_area = get_area(_n_seg);
get_scatterer_statistics(_n_seg, _area);
create_scatterer_buckets(_domain, _max_hopping_radius, _all_scat_list, _scat_buckets, _quenching_list, _q_buckets);
set_max_rate(_max_hopping_radius, _all_scat_list);
_c1_scat = contact_scats(_all_scat_list, _n_seg, 1, _domain);
_c2_scat = contact_scats(_all_scat_list, _n_seg, _n_seg, _domain);
_c1_pop = 1100;
_c2_pop = 0;
_particle_list = create_particles(_domain, _n_seg, _all_scat_list, _c1_pop, _c2_pop);
};
// read in the coordinate of all the cnt segments or molecules and create the scatterer objects that manage
// particle hopping between the sites
std::vector<scatterer> create_scatterers(const path_t& input_path){
std::cout << std::endl << "create scatterers in fiber structure ... " << std::flush;
std::ifstream pos_file;
std::ifstream orient_file;
std::ifstream chiral1_file;
std::ifstream chiral2_file;
// x axis
pos_file.open(input_path / "single_cnt.pos.x.dat");
orient_file.open(input_path / "single_cnt.orient.x.dat");
arma::mat xcoor;
xcoor.load(pos_file);
xcoor *= 1.e-9;
arma::mat xorient;
xorient.load(orient_file);
pos_file.close();
orient_file.close();
// y axis
pos_file.open(input_path / "single_cnt.pos.y.dat");
orient_file.open(input_path / "single_cnt.orient.y.dat");
arma::mat ycoor;
ycoor.load(pos_file);
ycoor *= 1.e-9;
arma::mat yorient;
yorient.load(orient_file);
pos_file.close();
orient_file.close();
// z axis
pos_file.open(input_path / "single_cnt.pos.z.dat");
orient_file.open(input_path / "single_cnt.orient.z.dat");
arma::mat zcoor;
zcoor.load(pos_file);
zcoor *= 1.e-9;
arma::mat zorient;
zorient.load(orient_file);
pos_file.close();
orient_file.close();
// chiral 1
chiral1_file.open(input_path / "single_cnt.chiral.1.dat");
chiral2_file.open(input_path / "single_cnt.chiral.2.dat");
arma::mat chiral1;
chiral1.load(chiral1_file);
arma::mat chiral2;
chiral2.load(chiral2_file);
chiral1_file.close();
chiral2_file.close();
std::vector<scatterer> scat_list(xcoor.n_elem);
for (unsigned i = 0; i < xcoor.n_rows; ++i) {
for (unsigned j = 0; j < xcoor.n_cols; ++j) {
unsigned n = i * xcoor.n_cols + j;
scat_list[n].set_pos({xcoor(i, j), ycoor(i, j), zcoor(i, j)});
scat_list[n].set_orientation({xorient(i, j), yorient(i, j), zorient(i, j)});
scat_list[n].set_chirality({4,2}); //TODO set to true chirality afterwards
if (j > 0) {
scat_list[n].left = n - 1;
}
if (j + 1 < xcoor.n_cols) {
scat_list[n].right = n + 1;
}
}
}
std::cout << "done!!!"
<< std::endl
<< std::endl
<< "total number of scatterers: " << scat_list.size()
<< std::endl;
return scat_list;
}
// read in the coordinate of all the cnt segments or molecules and create the scatterer objects that manage
// particle hopping between the sites
std::vector<scatterer> create_quenching_sites(const std::vector<scatterer>& scat_list, int num_quenching){
std::cout << std::endl << "create quenching sites in fiber structure ... " << std::flush;
std::vector<scatterer> q_list(num_quenching);
for (int n=0; n<num_quenching; n++){
int dice = std::rand() % scat_list.size();
const scatterer* s = &scat_list[dice];
arma::vec pos = s->pos();
arma::vec chirality = s->chirality();
arma::vec orientation = s->orientation();
double diameter = calc_diam(chirality[0],chirality[1]);
arma::vec dia_vec = {diameter/2, 0, 0};
arma::vec new_pos = pos + dia_vec;
q_list[n].set_quenching();
q_list[n].set_pos(new_pos);
}
std::cout << "done!!!"
<< std::endl
<< std::endl
<< "total number of quenching sites: " << q_list.size()
<< std::endl;
return q_list;
}
// create particles with a linear density profile in y direction
std::vector<particle> create_particles( const domain_t& domain, const unsigned n_seg,
const std::vector<scatterer>& scat_list, int left_pop, int right_pop) {
std::cout << "\n"
<< "create particles list:...";
std::vector<particle> p_list;
double y_min = domain.first(1);
double y_max = domain.second(1);
double dy = (y_max - y_min) / double(n_seg);
double dp = double(right_pop - left_pop) / (double(n_seg) - 1);
for (unsigned i=0; i<n_seg; ++i){
int n_particle = std::round(left_pop + double(i) * dp);
std::cout << "("<< i << "," << n_particle << ") ,";
double y1 = y_min + double(i) * dy;
double y2 = y1 + dy;
std::vector<const scatterer*> s_list;
for (const scatterer& s: scat_list){
if (y1<=s.pos(1) && s.pos(1)<y2){
s_list.emplace_back(&s);
}
}
for (int n=0; n<n_particle; n++){
int dice = std::rand()%s_list.size();
const scatterer* s = s_list[dice];
arma::vec pos = s->pos();
p_list.push_back(particle(pos,s,_particle_velocity));
}
}
std::cout << "...done!!!" << std::endl;
return p_list;
}
// save the json properties that is read and parsed from the input_json file.
void save_json_properties() {
std::ofstream json_file;
json_file.open(_output_directory.path() / "input.json", std::ios::out);
json_file << std::setw(4) << _json_prop << std::endl;
json_file.close();
};
// find minimum of the minimum coordinates of the scattering objects, this function will effectively give us the
// simulation domain
domain_t find_simulation_domain() const {
arma::vec min_coor = _all_scat_list.front().pos();
arma::vec max_coor = _all_scat_list.front().pos();
for (const auto& s : _all_scat_list) {
for (int i = 0; i < 3; ++i) {
min_coor(i) = min_coor(i) > s.pos(i) ? s.pos(i) : min_coor(i);
max_coor(i) = max_coor(i) < s.pos(i) ? s.pos(i) : max_coor(i);
}
}
return {min_coor, max_coor};
};
// step the simulation in time
void step(double dt) {
# pragma omp parallel
{
#pragma omp for
for (unsigned i=0; i<_particle_list.size(); ++i){
_particle_list[i].step(dt, _all_scat_list, _max_hopping_radius);
}
}
// increase simulation time
_time += dt;
};
// high level method to calculate proper scattering table
std::vector<std::vector<scattering_struct>> create_scattering_table(nlohmann::json j);
// method to calculate scattering rate via forster method
scattering_struct create_forster_scatt_table(double gamma_0, double r_0);
// method to calculate scattering rate via davoody et al. method
scattering_struct create_davoody_scatt_table(const cnt& d_cnt, const cnt& a_cnt);
// divide scatterers into buckets based on their location, and set the pointers to enclosing and neighboring buckets
// for each scatterer object
void create_scatterer_buckets(const domain_t domain, const double radius, std::vector<scatterer>& scat_list,
bucket_t& scat_buckets, std::vector<scatterer>& q_list, bucket_t& q_buckets) {
using namespace std;
std::cout << "\n"
<< "finding scatterer buckets: ";
double xmin = (domain.first)(0);
double xmax = (domain.second)(0);
int nx = std::ceil((xmax - xmin) / radius) + 1;
double ymin = (domain.first)(1);
double ymax = (domain.second)(1);
int ny = std::ceil((ymax - ymin) / radius) + 1;
double zmin = (domain.first)(2);
double zmax = (domain.second)(2);
int nz = std::ceil((zmax - zmin) / radius) + 1;
scat_buckets.resize(nx*ny*nz);
q_buckets.resize(nx*ny*nz);
for (scatterer& s : scat_list) {
int ix = (s.pos(0) - xmin) / radius;
int iy = (s.pos(1) - ymin) / radius;
int iz = (s.pos(2) - zmin) / radius;
int idx = ix + iy * nx + iz * nx * ny;
scat_buckets[idx].push_back(&s);
}
for (scatterer& s : q_list) {
int ix = (s.pos(0) - xmin) / radius;
int iy = (s.pos(1) - ymin) / radius;
int iz = (s.pos(2) - zmin) / radius;
int idx = ix + iy * nx + iz * nx * ny;
q_buckets[idx].push_back(&s);
}
for (scatterer& s : scat_list) {
int ix = (s.pos(0) - xmin) / radius;
int iy = (s.pos(1) - ymin) / radius;
int iz = (s.pos(2) - zmin) / radius;
for (int i : {ix - 1, ix, ix + 1}) {
for (int j : {iy - 1, iy, iy + 1}) {
for (int k : {iz - 1, iz, iz + 1}) {
if (i > -1 && i < nx && j > -1 && j < ny && k > -1 && k < nz) {
unsigned idx = i + j * nx + k * nx * ny;
s.close_scats.push_back(&(scat_buckets[idx]));
s.close_quenches.push_back(&(q_buckets[idx]));
}
}
}
}
}
std::cout << "done!\n";
}
// set the pointer to scattering table struct for all scatterer objects
void set_scat_table(const scattering_struct& scat_tab, std::vector<scatterer>& scat_list) {
for (auto& s : scat_list) {
s.scat_tab = &scat_tab;
}
}
// set the max scattering rate for all the scatterers
void set_max_rate(const double max_hopping_radius, std::vector<scatterer>& scat_list){
progress_bar prog(scat_list.size(), "setting max rate in scatterers");
# pragma omp parallel
{
# pragma omp for
for (unsigned i=0; i<scat_list.size(); ++i) {
scat_list[i].set_max_rate(max_hopping_radius);
#pragma omp critical
prog.step();
}
}
}
// repopulate contacts
void repopulate_contacts() {
double ymin = _domain.first(1);
double ymax = _domain.second(1);
double dy = (ymax - ymin) / double(_n_seg);
double y1 = ymin;
double y2 = ymin + dy;
repopulate(y1, y2, _c1_pop, _c1_scat, _particle_list);
y1 = ymin + double(_n_seg - 1) * dy;
y2 = ymax;
repopulate(y1, y2, _c2_pop, _c2_scat, _particle_list);
};
// take all the particles between ymin and ymax region and recycle them and populate the region with new particles
void repopulate(const double ymin, const double ymax, const unsigned n_particle,
const std::vector<const scatterer*>& s_list,
std::vector<particle>& p_list) {
unsigned j=p_list.size();
for (unsigned i = 0; i < j;) {
if (p_list[i].pos(1) >= ymin && p_list[i].pos(1) <= ymax) {
--j;
std::swap(p_list[i], p_list[j]);
} else {
++i;
}
}
int dice=0;
unsigned n=0;
unsigned final_size = j+n_particle;
unsigned j_lim = std::min(int(p_list.size()), int(final_size));
for (;j < j_lim; ++j) {
dice = std::rand() % s_list.size();
p_list[j] = particle(s_list[dice]->pos(), s_list[dice], _particle_velocity);
++n;
}
for (;n<n_particle; ++n){
dice = std::rand() % s_list.size();
p_list.emplace_back(particle(s_list[dice]->pos(), s_list[dice], _particle_velocity));
}
p_list.resize(final_size);
}
// create a list of scatterer pointers in the contact number i
std::vector<const scatterer*> contact_scats(const std::vector<scatterer>& s_list, const unsigned n_seg, const unsigned i,
const domain_t& domain) {
assert(i>0);
assert(i<=n_seg);
double ymin = domain.first(1);
double ymax = domain.second(1);
double dy = (ymax-ymin)/double(n_seg);
double y1 = ymin + double(i - 1) * dy;
double y2 = ymin + double(i) * dy;
std::vector<const scatterer*> c_list;
for (auto& s: s_list){
if (s.pos(1)>=y1 && s.pos(1)<=y2){
c_list.push_back(&s);
}
}
return c_list;
}
// calculate all the metrics needed from the experiment
void save_metrics(double dt) {
save_population_profile(_n_seg, dt);
save_currents(_n_seg, dt);
}
// calculate and save population profile
void save_population_profile(unsigned n, double dt) {
assert(_area.size()==n);
std::vector<int> pop(n, 0);
double ymax = (_domain.second)(1);
double ymin = (_domain.first)(1);
double dy = (ymax - ymin) / double(n);
if (!_pop_file.is_open()){
_pop_file.open(_output_directory.path() / "population_profile.dat", std::ios::out);
_pop_file << "area";
for (unsigned i=0; i<_area.size(); ++i) {
_pop_file << "," << std::scientific << std::showpos << _area[i];
}
_pop_file << std::endl
<< std::endl;
_pop_file << "dy";
for (unsigned i=0; i<_area.size(); ++i) {
_pop_file << "," << std::scientific << std::showpos << dy;
}
_pop_file << std::endl
<< std::endl;
_pop_file << "section pos";
for (unsigned i=0; i<_area.size(); ++i) {
_pop_file << "," << std::scientific << std::showpos << ymin+(double(i)+0.5)*dy;
}
_pop_file << std::endl
<< std::endl;
_pop_file << "time";
for (unsigned i = 0; i < _area.size(); ++i) {
_pop_file << ",section" << i;
}
_pop_file << std::endl;
}
int i = 0;
for (auto p : _particle_list) {
i = (p.pos(1) - ymin) / dy;
i = i < 0 ? 0 : (i < int(n) ? i : int(n) - 1);
pop[i]++;
}
_pop_file << std::showpos << std::scientific << _time;
for (unsigned j=0; j<pop.size(); ++j) {
_pop_file << "," << double(pop[j])/(_area[j]*dy);
}
_pop_file << std::endl;
}
// calculate and save population profile
void save_currents(int n, double dt) {
assert(int(_area.size())==n);
double ymax = _domain.second(1);
double ymin = _domain.first(1);
double dy = (ymax - ymin) / double(n);
std::vector<double> y(n-1,0);
std::vector<double> area_at_interface(n-1,0);
for (int i=1; i<n; ++i){
y[i-1] = ymin+dy*double(i);
area_at_interface[i-1] = (_area[i-1]+_area[i])/2;
}
if (!_curr_file.is_open()){
_curr_file.open(_output_directory.path() / "region_current.dat", std::ios::out);
_curr_file << "interface area";
for (const auto& a: area_at_interface){
_curr_file << std::showpos << std::scientific << "," << a;
}
_curr_file << std::endl
<< std::endl;
_curr_file << "interface pos";
for (const auto &yy : y)
{
_curr_file << std::showpos << std::scientific << "," << yy;
}
_curr_file << std::endl
<< std::endl;
_curr_file << "time";
for (int i=1; i<n; ++i){
_curr_file << ",interface" << (i-1);
}
_curr_file << std::endl;
}
std::vector<int> curr(n-1, 0);
for (unsigned i = 0; i < y.size(); ++i) {
for (auto p : _particle_list) {
if (p.old_pos(1) < y[i] && p.pos(1) >= y[i]) {
curr[i]++;
} else if (p.old_pos(1) >= y[i] && p.pos(1) < y[i]) {
curr[i]--;
}
}
}
_curr_file << std::showpos << std::scientific << _time;
for (unsigned i = 0; i<curr.size(); ++i) {
_curr_file << std::showpos << std::scientific << "," << double(curr[i])/(area_at_interface[i]*dt);
}
_curr_file << std::endl;
}
// get the max area of the structure for n_seg segments along y-axis
std::vector<double> get_area(unsigned n_seg){
assert(n_seg > 0);
double ymax = (_domain.second)(1);
double ymin = (_domain.first)(1);
double dy = (ymax - ymin) / double(n_seg);
std::vector<double> xmax(n_seg,_domain.first(0));
std::vector<double> xmin(n_seg,_domain.second(0));
std::vector<double> zmax(n_seg,_domain.first(2));
std::vector<double> zmin(n_seg,_domain.second(2));
for (auto& s: _all_scat_list){
int i = (s.pos(1)-ymin)/dy;
i = i<0 ? 0 : (i<int(n_seg) ? i : n_seg-1); // force i to be in range of 0 to n_seg-1
if (xmin[i] > s.pos(0)) {
xmin[i] = s.pos(0);
} else if (xmax[i] < s.pos(0)) {
xmax[i] = s.pos(0);
}
if (zmin[i] > s.pos(2)) {
zmin[i] = s.pos(2);
} else if (zmax[i] < s.pos(2)) {
zmax[i] = s.pos(2);
}
}
std::vector<double> area (n_seg, 0);
for (unsigned i=0; i<n_seg; ++i){
area[i] = (zmax[i] - zmin[i]) * (xmax[i] - xmin[i]);
}
// std::cout << "\nareas:";
// for (unsigned i=0; i<n_seg; ++i){
// std::cout << i << " , " << area[i] << std::endl;
// }
// std::cin.ignore();
return area;
}
// calculate and save statistics about all scatterer objects
void get_scatterer_statistics(const unsigned n_seg, const std::vector<double>& area){
assert(n_seg > 0);
assert(n_seg == area.size());
double ymin = _domain.first(1);
double ymax = _domain.second(1);
double dy = (ymax - ymin) / double(_n_seg);
std::vector<long> pop(_n_seg, 0);
for (auto& s:_all_scat_list){
int i = int(std::abs(s.pos(1) - ymin) / dy)%_n_seg;
pop[i]++;
}
std::vector<double> pos(_n_seg, 0);
for (unsigned i=0; i<pos.size(); ++i){
pos[i] = ymin + (double(i) + 0.5) * dy;
}
std::fstream f;
f.open(_output_directory.path() / "scatterer_statistics.dat", std::ios::out);
f << "position,distribution,population,density\n";
for (unsigned i=0; i<pop.size(); ++i){
f << std::scientific << pos[i] << "," << double(pop[i])/double(_all_scat_list.size()) << "," << pop[i] << "," << double(pop[i])/(area[i]*dy) << "\n";
}
f.close();
}
// trim all the scatterer objects outside a particular region.
void trim_scats(const limit_t xlim, const limit_t ylim, const limit_t zlim,
std::vector<scatterer>& s_list) {
std::cout << std::endl
<< "triming scattering list..."
<< std::flush;
// swap two scatterer objects in scatterer_list and update the index of right and left scatterer objects
auto swap_scatterers = [&s_list] (int i, int j){
int iLeft = s_list[i].left;
int iRight = s_list[i].right;
int jLeft = s_list[j].left;
int jRight = s_list[j].right;
int new_i = j;
int new_j = i;
if (iLeft > -1) s_list[iLeft].right = new_i;
if (iRight > -1) s_list[iRight].left = new_i;
if (jLeft > -1) s_list[jLeft].right = new_j;
if (jRight > -1) s_list[jRight].left = new_j;
scatterer t = s_list[i];
s_list[i] = s_list[j];
s_list[j] = t;
};
int j = s_list.size();
for (int i=0; i<j; ){
if (s_list[i].pos(0) < xlim.first || s_list[i].pos(1) < ylim.first || s_list[i].pos(2) < zlim.first ||
s_list[i].pos(0) > xlim.second || s_list[i].pos(1) > ylim.second || s_list[i].pos(2) > zlim.second) {
--j;
swap_scatterers(i,j);
// delete the links to scatterer objects at location j.
if (s_list[j].left > -1) s_list[s_list[j].left].right = -1;
if (s_list[j].right > -1) s_list[s_list[j].right].left = -1;
} else {
++i;
}
}
s_list.resize(j);
s_list.shrink_to_fit();
unsigned count(0);
for (unsigned i=0; i<s_list.size(); ++i){
if (s_list[i].left == -1 && s_list[i].right == -1)
count++;
}
std::cout << "...done!"
<< std::endl;
}
// this function, adds a particle from the left contact, tracks its position while it has not entered the right
// contact and saves its position
void track_particle(double dt, int fileNo){
// assert(_particle_list.empty() && "particle list is not empty!");
double ymin = _domain.first(1);
double ymax = _domain.second(1);
double dy = (ymax - ymin) / double(_n_seg);
double y1 = ymin;
double y2 = ymin + dy;
unsigned c1_pop = 1;
std::vector<particle> p_list;
repopulate(y1, y2, c1_pop, _c1_scat, p_list);
std::string base = _output_directory.path() / "particle_path.";
std::stringstream filename;
filename << base << fileNo << ".dat";
std::ofstream file(filename.str().c_str(), std::ios::out);
file << std::scientific << std::showpos << std::scientific;
y1 = ymin + double(_n_seg - 1) * dy;
y2 = ymax;
while (p_list.front().pos(1)<y1){
p_list.front().step(dt, _all_scat_list, _max_hopping_radius);
file << " " << p_list.front().pos(0) << " " << p_list.front().pos(1) << " " << p_list.front().pos(2) << "\n";
}
file << std::endl;
file.close();
}
// This method calculate mean square displacement of num_pop particles in the domain with respect to each time step.
// It uses same methodology of track_particle that puts all partcles in the left contact and tracks them until it reaches right contact.
// It outputs a file called mean_square_displacement.dat which records mean square displacement for every time step in the output folder
// parameters are: dt - time step in second (used in step function)
// num_pop - number of particles added initially to the domain
void calc_diffusion(double dt, int num_pop) {
// assert(_particle_list.empty() && "particle list is not empty!");
double ymin = _domain.first(1);
double ymax = _domain.second(1);
double dy = (ymax - ymin) / double(_n_seg);
double y1 = ymin;
double y2 = ymin + dy;
std::vector<particle> p_list;
repopulate(y1, y2, num_pop, _c1_scat, p_list);
std::vector<double> orig_pos0;
std::vector<double> orig_pos1;
std::vector<double> orig_pos2;
for (unsigned i = 0; i < p_list.size();i++) {
orig_pos0.emplace_back(p_list[i].pos(0));
orig_pos1.emplace_back(p_list[i].pos(1));
orig_pos2.emplace_back(p_list[i].pos(2));
}
std::stringstream filename;
std::string base = _output_directory.path() / "mean_square_displacement.dat";
filename << base;
std::ofstream file(filename.str().c_str(), std::ios::out);
file << std::scientific << std::showpos << std::scientific;
y1 = ymin + double(_n_seg - 1) * dy;
y2 = ymax;
unsigned num_left = p_list.size();
std::cout << num_left;
unsigned time = 0;
while (num_left > 0) {
double total_square_displace = 0;
for (unsigned i = 0; i < num_left;) {
if (p_list[i].pos(1) >= y1) {
/* std::swap(p_list[i], p_list[num_left-1]);
std::swap(orig_pos0[i], orig_pos0[num_left - 1]);
std::swap(orig_pos1[i], orig_pos1[num_left - 1]);
std::swap(orig_pos2[i], orig_pos2[num_left - 1]);*/
p_list.erase(p_list.begin()+i);
orig_pos0.erase(orig_pos0.begin()+i);
orig_pos1.erase(orig_pos1.begin()+i);
orig_pos2.erase(orig_pos2.begin()+i);
num_left=p_list.size();
}
else {
p_list[i].step(dt, _all_scat_list, _max_hopping_radius);
double square_displace = (p_list[i].pos(0) - orig_pos0[i]) * (p_list[i].pos(0) - orig_pos0[i]) +
(p_list[i].pos(1) - orig_pos1[i]) * (p_list[i].pos(1) - orig_pos1[i]) +
(p_list[i].pos(2) - orig_pos2[i]) * (p_list[i].pos(2) - orig_pos2[i]);
total_square_displace = total_square_displace + square_displace;
i++;
}
}
time++;
std::cout << "\r" <<"Number of particle left: " <<num_left<<" Simulation time: "<< time;
file << " " << double(total_square_displace/num_left) << " \n";
}
file << std::endl;
file.close();
std::cout << std::endl;
}
/*unsigned j = p_list.size();
for (unsigned i = 0; i < j;) {
if (p_list[i].pos(1) >= ymin && p_list[i].pos(1) <= ymax) {
--j;
std::swap(p_list[i], p_list[j]);
}
else {
++i;
}
}
*/
// initialize the simulation condition to calculate diffusion coefficient using green-kubo approach
void kubo_init();
// slice the domain into n sections in each direction, and return a list of scatterers in the center region as the injection region
std::vector<const scatterer *> injection_region(const std::vector<scatterer>& all_scat, const domain_t domain, const int n);
// slice the domain into n sections in each direction, and return the domain that leaves only 1 section from each side
domain_t get_removal_domain(const domain_t domain, const int n);
// create particles for kubo simulation
void kubo_create_particles();
// get maximum time for kubo simulation
const double& kubo_max_time() const {return _max_time;};
// step the kubo simulation in time
void kubo_step(double dt);
// save the displacement of individual particles in kubo simulation
void kubo_save_individual_particle_dispalcements();
// save the average displacement of particles in kubo simulation
void kubo_save_avg_dispalcement_squared();
void kubo_save_diffusion_tensor();
void kubo_save_diffusion_length();
}; // end class monte_carlo
} // end namespace mc
#endif // monte_carlo_h |
openmp_tournament.c | /*
* TOURNAMENT BARRIER: OPENMP
* To show correct functionality of barrier: Uncomment line 142
* To compile: gcc -o openmp_tournament openmp_tournament.c -lm -fopenmp
* To run: ./openmp_tournament [num_threads num_barriers]
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
#include <stdbool.h>
#define WINNER 0
#define LOSER 1
#define BYE 2
#define CHAMPION 3
#define DROPOUT 4
typedef struct record{
int role;
int opponent;
int flag;
}record_t;
record_t** players;
int P, N;
void barrier_init()
{
int i, k;
int rounds = (ceil(log(P)/log(2)));
players = (record_t**)malloc(P * sizeof(record_t*));
for(i = 0; i < P; i++)
{
players[i] = (record_t*)calloc(rounds+1, sizeof(record_t));
}
for(i = 0; i < P; i++)
{
for(k=0;k<=rounds;k++)
{
//Initializing
players[i][k].role = -1;
players[i][k].opponent = -1;
players[i][k].flag = 0;
//Initializing role and opponent
if (k==0){
players[i][k].role = DROPOUT;
}
else if (k>0){
if (i == 0 && (1<<k)>=P){
players[i][k].role = CHAMPION;
players[i][k].opponent = i+(1<<(k-1));
}
else if (i%(1<<k) == 0){
if (((i + (1<<(k-1))) < P) && ((1<<k) < P)){
players[i][k].role = WINNER;
players[i][k].opponent = i+(1<<(k-1));
}
else if ((i + (1<<(k-1))) >= P){
players[i][k].role = BYE;
}
}
else if ((i%(1<<k)) == (1<<(k-1))){
players[i][k].role = LOSER;
players[i][k].opponent = i-(1<<(k-1));
}
}
}
}
}
void tournament_barrier(int *pid_sense)
{
int rounds = (ceil(log(P)/log(2)));
int round = 1;
int vpid = omp_get_thread_num();
int k;
//Arrival tree
while(1)
{
if (players[vpid][round].role == LOSER){
players[players[vpid][round].opponent][round].flag = *pid_sense;
while (players[vpid][round].flag != *pid_sense);
break;
}
else if (players[vpid][round].role == WINNER){
while (players[vpid][round].flag != *pid_sense);
}
else if (players[vpid][round].role == CHAMPION){
while (players[vpid][round].flag != *pid_sense);
players[players[vpid][round].opponent][round].flag = *pid_sense;
break;
}
round += 1;
if (round > rounds)
break;
}
//Wakeup tree
while(1)
{
if (round == -1)
break;
round -= 1;
if (players[vpid][round].role == WINNER){
players[players[vpid][round].opponent][round].flag = *pid_sense;
}
else if (players[vpid][round].role == DROPOUT)
break;
}
*pid_sense = !*pid_sense;
}
int main(int argc, char* argv[])
{
if (argc == 3){
if (sscanf (argv[1], "%d", &P)!=1)
printf ("P - not an integer\n");
if (sscanf (argv[2], "%d", &N)!=1)
printf ("N - not an integer\n");
}
else{
//Number of processors
P = 4;
//Number of loops
N = 5;
}
struct timeval tv1, tv2;
double total_time;
int pid_sense = 1;
barrier_init();
omp_set_num_threads(P);
gettimeofday(&tv1, NULL);
#pragma omp parallel shared (players,N) firstprivate(pid_sense)
{
int i;
for(i=0;i<N;i++)
{
// printf("==============BARRIER %d=================\n", i);
tournament_barrier(&pid_sense);
tournament_barrier(&pid_sense);
tournament_barrier(&pid_sense);
tournament_barrier(&pid_sense);
tournament_barrier(&pid_sense);
}
}
gettimeofday(&tv2, NULL);
total_time = (double) (tv2.tv_usec - tv1.tv_usec) + (double) (tv2.tv_sec - tv1.tv_sec)*1000000;
printf("\nSUMMARY:\nTotal run-time for %d "
"loops with 5 barriers per loop: %fs\n"
"The average time per barrier: %fus\n",
N, total_time/1000000, (double)(total_time/(N*5)));
free(players);
return 0;
}
|
reduction_plus_2.c | // PASS: *
// RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 4 %t) %s.reference_output
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main()
{
int result = 100;
#pragma omp parallel reduction(+:result)
{
int rank = omp_get_thread_num();
result += rank;
}
printf("Result: %d\n", result);
}
|
GB_unop__log10_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log10_fc64_fc64)
// op(A') function: GB (_unop_tran__log10_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_clog10 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog10 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_clog10 (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG10 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log10_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog10 (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog10 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log10_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ImageGrises.c | #include <stdio.h>
#include <stdlib.h>
#include "omp.h"
int main()
{
FILE *image, *outputImage, *lecturas;
image = fopen("sample.bmp","rb"); //Imagen original a transformar
outputImage = fopen("img2_dd.bmp","wb"); //Imagen transformada
unsigned char r, g, b, pixel; //Pixel
omp_set_num_threads(100);
for(int i=0; i<54; i++) fputc(fgetc(image), outputImage); //Copia cabecera a nueva imagen
int i = 0;
//#pragma omp parallel for schedule(guided)
#pragma omp for
for(int i = 0; i < 927361; i++){ //Grises
b = fgetc(image);
g = fgetc(image);
r = fgetc(image);
i++;
pixel = 0.21*r + 0.72*g + 0.07*b;
fputc(pixel, outputImage);
fputc(pixel, outputImage);
fputc(pixel, outputImage);
//printf("%d\n", omp_get_thread_num());
}
fclose(image);
fclose(outputImage);
return 0;
}
|
fused_rowwise_nbit_conversion_ops.h | #pragma once
#include <algorithm>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
// for param_search_greedy
#include "caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h"
#include "caffe2/perfkernels/fused_nbit_rowwise_conversion.h"
namespace caffe2 {
template <
int BIT_RATE,
typename T,
void (*convert)(float* dst, const T* src, size_t N),
bool GREEDY = false>
class FloatToFusedNBitRowwiseQuantizedOp final : public Operator<CPUContext> {
public:
FloatToFusedNBitRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FloatToFusedNBitRowwiseQuantizedOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FLOAT);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
CAFFE_ENFORCE_EQ(
input.dim(input.dim() - 1) % NUM_ELEM_PER_BYTE,
0,
"FloatToFused" + caffe2::to_string(BIT_RATE) +
"BitRowwiseQuantizedOp only works for the number of "
"columns a multiple of " +
caffe2::to_string(NUM_ELEM_PER_BYTE));
// The "fused" representation stores the scale and bias with the
// row-wise quantized data in one tensor.
// Since we represent the scale and bias in 16-bit float, we'll use the
// last 4 bytes of each row for scale (2 bytes) and bias (2 bytes).
// | ... quantized data ... | scale | bias |
// | number_of_columns | 2B | 2B |
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] = static_cast<std::int64_t>(
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
2 * sizeof(at::Half));
auto* output = Output(
DATA_FUSED_SCALE_BIAS, output_dimensions, at::dtype<std::uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<std::uint8_t>();
if (!GREEDY && std::is_same<T, float>::value) {
// fast path
CAFFE_ENFORCE(
reinterpret_cast<void (*)(float*, const float*, std::size_t)>(
convert) == internal::convertfp32fp32,
"When T == float, convert must be convertfp32fp32");
FloatToFusedNBitRowwiseQuantizedSBHalf(
BIT_RATE,
reinterpret_cast<const float*>(input_data),
input_rows,
input_columns,
output_data);
} else {
const auto output_columns = output->size(output->dim() - 1);
#ifdef _OPENMP
vector<float> tmp_vec(
input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
vector<float> tmp_vec(input_columns);
#endif
#pragma omp parallel for if (GREEDY)
for (int row = 0; row < input_rows; ++row) {
float* tmp = tmp_vec.data();
#ifdef _OPENMP
if (GREEDY) {
tmp = &tmp_vec[omp_get_thread_num() * input_columns];
}
#endif
convert(tmp, input_data + row * input_columns, input_columns);
std::uint8_t* output_row = output_data + row * output_columns;
at::Half* output_row_scale = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
at::Half* output_row_bias = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
float Xmin = *std::min_element(tmp, tmp + input_columns);
float Xmax = *std::max_element(tmp, tmp + input_columns);
if (GREEDY) {
internal::param_search_greedy(
tmp, input_columns, 200, 0.16, Xmin, Xmax, BIT_RATE);
}
// Round Xmin to fp16 to match with dequantization that will use fp16
// for Xmin.
Xmin = static_cast<at::Half>(Xmin);
const float range = Xmax - Xmin;
// Round scale to fp16 to match with dequantization that will use fp16
// for scale.
// Set scale to 1.0f for the corner case of Xmax == Xmin .
// Any non-zero scale would work because during quantization
// (X - Xmin) / scale will be 0 for all X unless scale is 0.
at::Half scale = range == 0 ? 1.0f : range / ((1 << BIT_RATE) - 1);
float inverse_scale = scale == 0 ? 1.0f : 1.0f / scale;
if (scale == 0 || std::isinf(inverse_scale)) {
// Corner case handling when Xmax == Xmin
// Any scale would work because X - Xmin will be 0 for all X
scale = 1.0f;
inverse_scale = 1.0f;
}
*output_row_scale = scale;
*output_row_bias = Xmin;
for (int col = 0; col < input_columns; ++col) {
float X = tmp[col];
std::uint8_t quantized = std::max(
0,
std::min<int>(
std::lrintf((X - Xmin) * inverse_scale),
(1 << BIT_RATE) - 1));
if (col % NUM_ELEM_PER_BYTE == 0) {
output_row[col / NUM_ELEM_PER_BYTE] = quantized;
} else {
output_row[col / NUM_ELEM_PER_BYTE] |=
(quantized << ((col % NUM_ELEM_PER_BYTE) * BIT_RATE));
}
}
}
} // GREEDY || !std::is_same<T, float>::value
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS);
};
template <
int BIT_RATE,
typename T,
void (*convert)(T* dst, const float* src, size_t N)>
class FusedNBitRowwiseQuantizedToFloatOp final : public Operator<CPUContext> {
public:
FusedNBitRowwiseQuantizedToFloatOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FusedNBitRowwiseQuantizedToFloatOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FUSED_SCALE_BIAS);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and bias.
// The rest of input_columns is the number of values in the original row.
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] =
static_cast<std::int64_t>(input_columns - 2 * sizeof(at::Half)) *
NUM_ELEM_PER_BYTE;
auto* output = Output(DATA_FLOAT, output_dimensions, at::dtype<T>());
const auto output_columns = output->size(output->dim() - 1);
const auto* input_data = input.template data<std::uint8_t>();
T* output_data = output->template mutable_data<T>();
if (std::is_same<T, float>::value) {
// fast path
CAFFE_ENFORCE(
reinterpret_cast<void (*)(float*, const float*, std::size_t)>(
convert) == internal::convertfp32fp32,
"When T == float, convert must be convertfp32fp32");
FusedNBitRowwiseQuantizedSBHalfToFloat(
BIT_RATE,
input_data,
input_rows,
input_columns,
reinterpret_cast<float*>(output_data));
} else {
std::vector<float> tmp(output_columns);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (size_t row = 0; row < input_rows; ++row) {
const std::uint8_t* input_row = input_data + row * input_columns;
float scale = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
float bias = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
for (int col = 0; col < output_columns; ++col) {
std::uint8_t quantized = input_row[col / NUM_ELEM_PER_BYTE];
quantized >>= (col % NUM_ELEM_PER_BYTE) * BIT_RATE;
quantized &= (1 << BIT_RATE) - 1;
tmp[col] = scale * quantized + bias;
}
convert(output_data + row * output_columns, tmp.data(), output_columns);
}
}
return true;
}
private:
INPUT_TAGS(DATA_FUSED_SCALE_BIAS);
OUTPUT_TAGS(DATA_FLOAT);
};
} // namespace caffe2
|
SystemMatrix.h |
/*****************************************************************************
*
* Copyright (c) 2003-2018 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014 by Centre for Geoscience Computing (GeoComp)
*
*****************************************************************************/
/****************************************************************************/
/* Paso: SystemMatrix */
/****************************************************************************/
/* Copyrights by ACcESS Australia 2003,2004,2005,2006 */
/* Author: Lutz Gross, l.gross@uq.edu.au */
/****************************************************************************/
#ifndef __PASO_SYSTEMMATRIX_H__
#define __PASO_SYSTEMMATRIX_H__
#include "SparseMatrix.h"
#include "SystemMatrixPattern.h"
#include <escript/AbstractSystemMatrix.h>
namespace paso {
struct Options;
class SystemMatrix;
typedef boost::shared_ptr<SystemMatrix> SystemMatrix_ptr;
typedef boost::shared_ptr<const SystemMatrix> const_SystemMatrix_ptr;
typedef int SystemMatrixType;
/// this class holds a (distributed) stiffness matrix
class SystemMatrix : public escript::AbstractSystemMatrix
{
public:
/// default constructor - throws exception.
SystemMatrix();
SystemMatrix(SystemMatrixType type, SystemMatrixPattern_ptr pattern,
dim_t rowBlockSize, dim_t columnBlockSize,
bool patternIsUnrolled, const escript::FunctionSpace& rowFS,
const escript::FunctionSpace& colFS);
~SystemMatrix();
/// Nullifies rows and columns in the matrix.
/// The rows and columns are marked by positive values in mask_row and
/// mask_col. Values on the main diagonal which are marked to set to
/// zero by both mask_row and mask_col are set to main_diagonal_value.
virtual void nullifyRowsAndCols(escript::Data& mask_row,
escript::Data& mask_col,
double main_diagonal_value);
virtual inline void saveMM(const std::string& filename) const
{
if (mpi_info->size > 1) {
//throw PasoException("SystemMatrix::saveMM: Only single rank supported.");
SparseMatrix_ptr merged(mergeSystemMatrix());
if (mpi_info->rank == 0)
merged->saveMM(filename.c_str());
} else {
mainBlock->saveMM(filename.c_str());
}
}
virtual inline void saveHB(const std::string& filename) const
{
if (mpi_info->size > 1) {
throw PasoException("SystemMatrix::saveHB: Only single rank supported.");
} else if (!(type & MATRIX_FORMAT_CSC)) {
throw PasoException("SystemMatrix::saveHB: Only CSC format supported.");
} else {
mainBlock->saveHB_CSC(filename.c_str());
}
}
virtual void resetValues(bool preserveSolverData = false);
/// Nullifies rows in the matrix.
/// The rows are marked by positive values in mask_row. Values on the
/// main diagonal which are marked to set to zero by mask_row are set
/// to main_diagonal_value.
void nullifyRows(double* mask_row, double main_diagonal_value);
void add(dim_t, index_t*, dim_t, dim_t, index_t*, dim_t, double*);
void makeZeroRowSums(double* left_over);
/// copies the col_coupleBlock into row_coupleBlock.
/// WARNING: this method uses mpi_requests of the coupler attached to the
/// matrix. No reordering on the received columns is performed.
/// In practice this means that components in
/// row_coupleBlock->pattern->index and
/// row_coupler->connector->recv->shared
/// are ordered by increasing value.
/// Note that send and receive row_coupler->connectors are swapping roles.
void copyColCoupleBlock();
void copyRemoteCoupleBlock(bool recreatePattern);
void fillWithGlobalCoordinates(double f1);
void print() const;
/// Merges the system matrix which is distributed on several MPI ranks
/// into a complete sparse matrix on rank 0. Used by the Merged Solver.
SparseMatrix_ptr mergeSystemMatrix() const;
void mergeMainAndCouple(index_t** p_ptr, index_t** p_idx, double** p_val) const;
void mergeMainAndCouple_CSR_OFFSET0(index_t** p_ptr, index_t** p_idx, double** p_val) const;
void mergeMainAndCouple_CSR_OFFSET0_Block(index_t** p_ptr, index_t** p_idx, double** p_val) const;
void mergeMainAndCouple_CSC_OFFSET1(index_t** p_ptr, index_t** p_idx, double** p_val) const;
void copyMain_CSC_OFFSET1(index_t** p_ptr, index_t** p_idx, double** p_val);
void extendedRowsForST(dim_t* degree_ST, index_t* offset_ST, index_t* ST);
void applyBalanceInPlace(double* x, bool RHS) const;
void applyBalance(double* x_out, const double* x, bool RHS) const;
void balance();
double getGlobalSize() const;
void setPreconditioner(Options* options);
/// Applies the preconditioner.
/// This method needs to be called within a parallel region.
/// Barrier synchronization is performed before the evaluation to make
/// sure that the input vector is available
void solvePreconditioner(double* x, double* b);
void freePreconditioner();
index_t* borrowMainDiagonalPointer() const;
inline void startCollect(const double* in) const
{
startColCollect(in);
}
inline double* finishCollect() const
{
return finishColCollect();
}
inline void startColCollect(const double* in) const
{
col_coupler->startCollect(in);
}
inline double* finishColCollect() const
{
return col_coupler->finishCollect();
}
inline void startRowCollect(const double* in)
{
row_coupler->startCollect(in);
}
inline double* finishRowCollect()
{
return row_coupler->finishCollect();
}
inline dim_t getNumRows() const
{
return mainBlock->numRows;
}
inline dim_t getNumCols() const
{
return mainBlock->numCols;
}
inline dim_t getTotalNumRows() const
{
return getNumRows() * row_block_size;
}
inline dim_t getTotalNumCols() const
{
return getNumCols() * col_block_size;
}
inline dim_t getRowOverlap() const
{
return row_coupler->getNumOverlapComponents();
}
inline dim_t getColOverlap() const
{
return col_coupler->getNumOverlapComponents();
}
inline dim_t getGlobalNumRows() const
{
if (type & MATRIX_FORMAT_CSC) {
return pattern->input_distribution->getGlobalNumComponents();
}
return pattern->output_distribution->getGlobalNumComponents();
}
inline dim_t getGlobalNumCols() const
{
if (type & MATRIX_FORMAT_CSC) {
return pattern->output_distribution->getGlobalNumComponents();
}
return pattern->input_distribution->getGlobalNumComponents();
}
inline dim_t getGlobalTotalNumRows() const
{
return getGlobalNumRows() * row_block_size;
}
inline dim_t getGlobalTotalNumCols() const
{
return getGlobalNumCols() * col_block_size;
}
inline double getSparsity() const
{
return getGlobalSize() /
((double)getGlobalTotalNumRows()*getGlobalTotalNumCols());
}
inline dim_t getNumOutput() const
{
return pattern->getNumOutput();
}
inline void copyBlockFromMainDiagonal(double* out) const
{
mainBlock->copyBlockFromMainDiagonal(out);
}
inline void copyBlockToMainDiagonal(const double* in)
{
mainBlock->copyBlockToMainDiagonal(in);
}
inline void copyFromMainDiagonal(double* out) const
{
mainBlock->copyFromMainDiagonal(out);
}
inline void copyToMainDiagonal(const double* in)
{
mainBlock->copyToMainDiagonal(in);
}
inline void setValues(double value)
{
mainBlock->setValues(value);
col_coupleBlock->setValues(value);
row_coupleBlock->setValues(value);
is_balanced = false;
}
inline void rowSum(double* row_sum) const
{
if ((type & MATRIX_FORMAT_CSC) || (type & MATRIX_FORMAT_OFFSET1)) {
throw PasoException("SystemMatrix::rowSum: No normalization "
"available for compressed sparse column or index offset 1.");
} else {
const dim_t nrow = mainBlock->numRows*row_block_size;
#pragma omp parallel for
for (index_t irow=0; irow<nrow; ++irow) {
row_sum[irow]=0.;
}
mainBlock->addRow_CSR_OFFSET0(row_sum);
col_coupleBlock->addRow_CSR_OFFSET0(row_sum);
}
}
void MatrixVector(double alpha, const double* in, double beta,
double* out) const;
void MatrixVector_CSR_OFFSET0(double alpha, const double* in, double beta,
double* out) const;
static SystemMatrix_ptr loadMM_toCSR(const char* filename);
static SystemMatrix_ptr loadMM_toCSC(const char* filename);
static int getSystemMatrixTypeId(int solver, int preconditioner,
int package, bool symmetry,
const escript::JMPI& mpi_info);
SystemMatrixType type;
SystemMatrixPattern_ptr pattern;
dim_t logical_row_block_size;
dim_t logical_col_block_size;
dim_t row_block_size;
dim_t col_block_size;
dim_t block_size;
escript::Distribution_ptr row_distribution;
escript::Distribution_ptr col_distribution;
escript::JMPI mpi_info;
Coupler_ptr<real_t> col_coupler;
Coupler_ptr<real_t> row_coupler;
/// main block
SparseMatrix_ptr mainBlock;
/// coupling to neighbouring processors (row - col)
SparseMatrix_ptr col_coupleBlock;
/// coupling to neighbouring processors (col - row)
SparseMatrix_ptr row_coupleBlock;
/// coupling of rows-cols on neighbouring processors (may not be valid)
SparseMatrix_ptr remote_coupleBlock;
bool is_balanced;
/// matrix may be balanced by a diagonal matrix D=diagonal(balance_vector)
/// if is_balanced is true, the matrix stored is D*A*D where A is the
/// original matrix.
/// When the system of linear equations is solved we solve D*A*D*y=c.
/// So to solve A*x=b one needs to set c=D*b and x=D*y.
double* balance_vector;
/// stores the global ids for all cols in col_coupleBlock
mutable index_t* global_id;
/// package code controlling the solver pointer
mutable index_t solver_package;
/// pointer to data needed by a solver
void* solver_p;
private:
virtual void setToSolution(escript::Data& out, escript::Data& in,
boost::python::object& options) const;
virtual void ypAx(escript::Data& y, escript::Data& x) const;
void solve(double* out, double* in, Options* options) const;
};
void RHS_loadMM_toCSR(const char* filename, double* b, dim_t size);
} // namespace paso
#endif // __PASO_SYSTEMMATRIX_H__
|
bem_pbc.c | #include "common.h"
void build_matrix_T(double *x_t, int *tri_nodes, double *bm, double *T, int n_node, int n_face) {
int i,j,k, p, c;
double be[3];
double *v, *v1, *v2, *v3;
//#pragma omp parallel for
#pragma omp parallel for private(i,j,k,p,c,be,v,v1,v2,v3)
for(p=0; p<n_node; p++){
v = &x_t[3*p];
for(c=0; c<n_face; c++){
i = tri_nodes[3*c];
j = tri_nodes[3*c+1];
k = tri_nodes[3*c+2];
v1 = &x_t[3*i];
v2 = &x_t[3*j];
v3 = &x_t[3*k];
boundary_element(v, v1, v2, v3, be, T);
bm[p*n_node+i] += be[0];
bm[p*n_node+j] += be[1];
bm[p*n_node+k] += be[2];
}
}
}
|
ZFC_VGG16_CPU.c | /*
Pretrained VGG16 convolutional neural network in C language
GitHUB Page: https://github.com/ZFTurbo/VGG16-Pretrained-C
Author: ZFTurbo
Compilation: gcc -O3 -fopenmp -lm ZFC_VGG16_CPU.c -o ZFC_VGG16_CPU.exe
Usage: ZFC_VGG16_CPU.exe <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)>
Example: ZFC_VGG16_CPU.exe "weights.txt" "image_list.txt" "results.txt" 1
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <omp.h>
//################################################################
// Start of PAPI related variables
//################################################################
#include <papi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "log_data.h"
#include <pthread.h>
extern int retval;
extern int EventSet;
extern int EventCode;
extern int skip_papi_cntr;
extern int skip_papi_cntr_threshold;
extern char EventCodeStr[PAPI_MAX_STR_LEN];
extern long_long values[35];
extern struct timespec begin;
extern struct timespec current;
extern long long start;
extern long long elapsed;
extern long long microseconds;
extern long long previoustime;
extern pthread_mutex_t mutex_papi;
int retval;
int EventSet = PAPI_NULL;
int EventCode;
int skip_papi_cntr = 0;
int skip_papi_cntr_threshold = 1;
char EventCodeStr[PAPI_MAX_STR_LEN];
long_long values[35];
struct timespec begin;
struct timespec current;
long long start;
long long elapsed;
long long microseconds;
long long previoustime = 0;
static int NUM_THREADS = -1;
pthread_mutex_t mutex_papi = PTHREAD_MUTEX_INITIALIZER;
unsigned long omp_get_thread_num_wrapper(void) {
return (unsigned long) omp_get_thread_num();
}
//################################################################
// End of PAPI related variables
//################################################################
#define NANOS 1000000000LL
#ifdef _WIN32
#define _CRT_SECURE_NO_WARNINGS 1
void gettimeofday(time_t *tp, char *_)
{
*tp = clock();
return;
}
double get_seconds(time_t timeStart, time_t timeEnd) {
return (double)(timeEnd - timeStart) / CLOCKS_PER_SEC;
}
#else
double get_seconds(struct timeval timeStart, struct timeval timeEnd) {
return ((timeEnd.tv_sec - timeStart.tv_sec) * 1000000 + timeEnd.tv_usec - timeStart.tv_usec) / 1.e6;
}
#endif
#define SIZE 224
#define CONV_SIZE 3
int numthreads;
// Weights and image block START
float ***image;
int cshape[13][4] = {
{ 64, 3, CONV_SIZE, CONV_SIZE },
{ 64, 64, CONV_SIZE, CONV_SIZE },
{ 128, 64, CONV_SIZE, CONV_SIZE },
{ 128, 128, CONV_SIZE, CONV_SIZE },
{ 256, 128, CONV_SIZE, CONV_SIZE },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 512, 256, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE }
};
float *****wc;
float **bc;
int dshape[3][2] = {
{ 25088, 4096 },
{ 4096, 4096 },
{ 4096, 1000 }
};
float ***wd;
float **bd;
// Blocks for intermediate convolutions
int mem_block_shape[3] = {512, SIZE, SIZE};
float ***mem_block1;
float ***mem_block2;
// Blocks for dense flatten layers
int mem_block_dense_shape = { 512 * 7 * 7 };
float *mem_block1_dense;
float *mem_block2_dense;
// Weights and image block END
void reset_mem_block(float ***mem) {
int i, j, k;
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
for (k = 0; k < mem_block_shape[2]; k++) {
mem[i][j][k] = 0.0;
}
}
}
}
void reset_mem_block_dense(float *mem) {
int i;
for (i = 0; i < mem_block_dense_shape; i++) {
mem[i] = 0.0;
}
}
void init_memory() {
int i, j, k, l;
// Init image memory
image = malloc(3 * sizeof(float**));
for (i = 0; i < 3; i++) {
image[i] = malloc(SIZE * sizeof(float*));
for (j = 0; j < SIZE; j++) {
image[i][j] = malloc(SIZE * sizeof(float));
}
}
// Init convolution weights
wc = malloc(13 * sizeof(float****));
bc = malloc(13 * sizeof(float*));
for (l = 0; l < 13; l++) {
wc[l] = malloc(cshape[l][0] * sizeof(float***));
for (i = 0; i < cshape[l][0]; i++) {
wc[l][i] = malloc(cshape[l][1] * sizeof(float**));
for (j = 0; j < cshape[l][1]; j++) {
wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*));
for (k = 0; k < cshape[l][2]; k++) {
wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float));
}
}
}
bc[l] = malloc(cshape[l][0] * sizeof(float));
}
// Init dense weights
wd = malloc(3 * sizeof(float**));
bd = malloc(3 * sizeof(float*));
for (l = 0; l < 3; l++) {
wd[l] = malloc(dshape[l][0] * sizeof(float*));
for (i = 0; i < dshape[l][0]; i++) {
wd[l][i] = malloc(dshape[l][1] * sizeof(float));
}
bd[l] = malloc(dshape[l][1] * sizeof(float));
}
// Init mem_blocks
mem_block1 = malloc(mem_block_shape[0] * sizeof(float**));
mem_block2 = malloc(mem_block_shape[0] * sizeof(float**));
for (i = 0; i < mem_block_shape[0]; i++) {
mem_block1[i] = malloc(mem_block_shape[1] * sizeof(float*));
mem_block2[i] = malloc(mem_block_shape[1] * sizeof(float*));
for (j = 0; j < mem_block_shape[1]; j++) {
mem_block1[i][j] = malloc(mem_block_shape[2] * sizeof(float));
mem_block2[i][j] = malloc(mem_block_shape[2] * sizeof(float));
}
}
reset_mem_block(mem_block1);
reset_mem_block(mem_block2);
// Init mem blocks dense
mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float));
mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float));
}
void free_memory() {
int i, j, k, l;
// Free image memory
for (i = 0; i < 3; i++) {
for (j = 0; j < SIZE; j++) {
free(image[i][j]);
}
free(image[i]);
}
free(image);
// Free convolution weights
for (l = 0; l < 13; l++) {
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
for (k = 0; k < cshape[l][2]; k++) {
free(wc[l][i][j][k]);
}
free(wc[l][i][j]);
}
free(wc[l][i]);
}
free(wc[l]);
free(bc[l]);
}
free(wc);
free(bc);
// Free dense weights
for (l = 0; l < 3; l++) {
for (i = 0; i < dshape[l][0]; i++) {
free(wd[l][i]);
}
free(wd[l]);
free(bd[l]);
}
free(wd);
free(bd);
// Free memblocks
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
free(mem_block1[i][j]);
free(mem_block2[i][j]);
}
free(mem_block1[i]);
free(mem_block2[i]);
}
free(mem_block1);
free(mem_block2);
free(mem_block1_dense);
free(mem_block2_dense);
}
void read_weights(char *in_file, int lvls) {
float dval;
int i, j, k, l, z;
//FILE *iin;
int total_lvls_read = 0;
/*iin = fopen(in_file, "r");
if (iin == NULL) {
printf("File %s absent\n", in_file);
exit(1);
}
*/
// Reading convolution weights (store them flipped from begining)
for (z = 0; z < 13; z++) {
if (total_lvls_read >= lvls && lvls != -1)
break;
printf("Read conv block %d weights\n", z);
for (i = 0; i < cshape[z][0]; i++) {
for (j = 0; j < cshape[z][1]; j++) {
for (k = 0; k < cshape[z][2]; k++) {
for (l = 0; l < cshape[z][3]; l++) {
//fscanf(iin, "%f", &dval);
dval = rand() / RAND_MAX;
wc[z][i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = dval;
}
}
}
}
for (i = 0; i < cshape[z][0]; i++) {
//fscanf(iin, "%f", &dval);
dval = rand() / RAND_MAX;
bc[z][i] = dval;
}
total_lvls_read += 1;
}
// Reading dense weights
for (z = 0; z < 3; z++) {
if (total_lvls_read >= lvls && lvls != -1)
break;
printf("Read dense block %d weights\n", z);
for (i = 0; i < dshape[z][0]; i++) {
for (j = 0; j < dshape[z][1]; j++) {
dval = rand() / RAND_MAX;
wd[z][i][j] = dval;
}
}
for (i = 0; i < dshape[z][1]; i++) {
dval = rand() / RAND_MAX;
bd[z][i] = dval;
}
total_lvls_read += 1;
}
//fclose(iin);
}
void read_image(char *in_file) {
int i, j, l;
//FILE *iin;
float dval;
/*iin = fopen(in_file, "r");
if (iin == NULL) {
printf("File %s absent\n", in_file);
exit(1);
}
*/
/* Reading image */
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
for (l = 0; l < 3; l++) {
dval = rand() / RAND_MAX;
//fscanf(iin, "%f", &dval);
image[l][i][j] = dval;
}
}
}
// fclose(iin);
}
void normalize_image() {
int i, j, l;
float coef[3] = { 103.939, 116.779, 123.68 };
for (l = 0; l < 3; l++) {
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
image[l][i][j] -= coef[l];
}
}
}
}
void convolution_3_x_3(float **matrix, float **kernel, float **out, int size) {
int i, j;
float sum;
float zeropad[SIZE + 2][SIZE + 2] = { 0.0 };
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
sum = zeropad[i][j] * kernel[0][0] +
zeropad[i + 1][j] * kernel[1][0] +
zeropad[i + 2][j] * kernel[2][0] +
zeropad[i][j + 1] * kernel[0][1] +
zeropad[i + 1][j + 1] * kernel[1][1] +
zeropad[i + 2][j + 1] * kernel[2][1] +
zeropad[i][j + 2] * kernel[0][2] +
zeropad[i + 1][j + 2] * kernel[1][2] +
zeropad[i + 2][j + 2] * kernel[2][2];
out[i][j] += sum;
}
}
}
void add_bias_and_relu(float **out, float bs, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
out[i][j] += bs;
if (out[i][j] < 0)
out[i][j] = 0.0;
// printf("%.12lf\n", out[i][j]);
}
}
}
void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) {
int i;
for (i = 0; i < size; i++) {
out[i] += bs[i];
if (relu == 1) {
if (out[i] < 0)
out[i] = 0.0;
}
}
}
float max_of_4(float a, float b, float c, float d) {
if (a >= b && a >= c && a >= d) {
return a;
}
if (b >= c && b >= d) {
return b;
}
if (c >= d) {
return c;
}
return d;
}
void maxpooling(float **out, int size) {
int i, j;
for (i = 0; i < size; i+=2) {
for (j = 0; j < size; j+=2) {
out[i / 2][j / 2] = max_of_4(out[i][j], out[i + 1][j], out[i][j + 1], out[i + 1][j + 1]);
}
}
}
void flatten(float ***in, float *out, int sh0, int sh1, int sh2) {
int i, j, k, total = 0;
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
out[total] = in[i][j][k];
total += 1;
}
}
}
}
void dense(float *in, float **weights, float *out, int sh_in, int sh_out) {
int i, j;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < sh_out; i++) {
float sum = 0.0;
for (j = 0; j < sh_in; j++) {
sum += in[j] * weights[j][i];
}
out[i] = sum;
}
}
void softmax(float *out, int sh_out) {
int i;
float max_val, sum;
max_val = out[0];
for (i = 1; i < sh_out; i++) {
if (out[i] > max_val)
max_val = out[i];
}
sum = 0.0;
for (i = 0; i < sh_out; i++) {
out[i] = exp(out[i] - max_val);
sum += out[i];
}
for (i = 0; i < sh_out; i++) {
out[i] /= sum;
}
}
void dump_memory_structure_conv(float ***mem, int sh0, int sh1, int sh2) {
int i, j, k;
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
printf("%.12lf\n", mem[i][j][k]);
}
}
}
}
void dump_memory_structure_conv_to_file(float ***mem, int sh0, int sh1, int sh2) {
FILE *out;
int i, j, k;
out = fopen("debug_c.txt", "w");
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
fprintf(out, "%.12lf\n", mem[i][j][k]);
}
}
}
fclose(out);
}
void dump_memory_structure_dense(float *mem, int sh0) {
int i;
for (i = 0; i < sh0; i++) {
printf("%.12lf\n", mem[i]);
}
}
void dump_memory_structure_dense_to_file(float *mem, int sh0) {
FILE *out;
int i;
out = fopen("debug_c.txt", "w");
for (i = 0; i < sh0; i++) {
fprintf(out, "%.12lf\n", mem[i]);
}
fclose(out);
}
void dump_image() {
int i, j, k;
for (i = 0; i < 3; i++) {
for (j = 0; j < SIZE; j++) {
for (k = 0; k < SIZE; k++) {
printf("%.12lf\n", image[i][j][k]);
}
}
}
}
void get_VGG16_predict(int only_convolution) {
int i, j;
int level, cur_size;
// Init intermediate memory
reset_mem_block(mem_block1);
reset_mem_block(mem_block2);
reset_mem_block_dense(mem_block1_dense);
reset_mem_block_dense(mem_block2_dense);
waca_papi_read("Hello");
// Layer 1 (Convolution 3 -> 64)
level = 0;
cur_size = SIZE;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(image[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
waca_papi_read("Hello");
// Layer 2 (Convolution 64 -> 64)
level = 1;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size);
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
waca_papi_read("Hello");
// Layer 3 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block2[i], cur_size);
}
cur_size /= 2;
waca_papi_read("Hello");
// Layer 4 (Convolution 64 -> 128)
level = 2;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
waca_papi_read("Hello");
// Layer 5 (Convolution 128 -> 128)
level = 3;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size);
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
waca_papi_read("Hello");
// Layer 6 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block2[i], cur_size);
}
cur_size /= 2;
waca_papi_read("Hello");
// Layer 7 (Convolution 128 -> 256)
level = 4;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
waca_papi_read("Hello");
// Layer 8 (Convolution 256 -> 256)
level = 5;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size);
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
waca_papi_read("Hello");
// Layer 9 (Convolution 256 -> 256)
level = 6;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
waca_papi_read("Hello");
// Layer 10 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block1[i], cur_size);
}
cur_size /= 2;
waca_papi_read("Hello");
// Layer 11 (Convolution 256 -> 512)
level = 7;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size);
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
waca_papi_read("Hello");
// Layer 12 (Convolution 512 -> 512)
level = 8;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
waca_papi_read("Hello");
// Layer 13 (Convolution 512 -> 512)
level = 9;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size);
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
waca_papi_read("Hello");
// Layer 14 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block2[i], cur_size);
}
cur_size /= 2;
waca_papi_read("Hello");
// Layer 15 (Convolution 512 -> 512)
level = 10;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
waca_papi_read("Hello");
// Layer 16 (Convolution 512 -> 512)
level = 11;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size);
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
waca_papi_read("Hello");
// Layer 17 (Convolution 512 -> 512)
level = 12;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size);
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
waca_papi_read("Hello");
// Layer 18 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(numthreads)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block1[i], cur_size);
}
cur_size /= 2;
waca_papi_read("Hello");
// Layer 19 (Flatten)
flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
if (only_convolution == 1) {
return;
}
waca_papi_read("Hello");
// Layer 20 (Dense)
level = 0;
dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]);
add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1);
reset_mem_block_dense(mem_block1_dense);
waca_papi_read("Hello");
// Layer 21 (Dense)
level = 1;
dense(mem_block2_dense, wd[level], mem_block1_dense, dshape[level][0], dshape[level][1]);
add_bias_and_relu_flatten(mem_block1_dense, bd[level], dshape[level][1], 1);
reset_mem_block_dense(mem_block2_dense);
waca_papi_read("Hello");
// Layer 22 (Dense)
level = 2;
dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]);
add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1);
softmax(mem_block2_dense, dshape[level][1]);
// dump_memory_structure_dense_to_file(mem_block2_dense, dshape[level][1]);
waca_papi_read("Hello");
return;
}
void output_predictions(FILE *out, int only_convolution) {
int i;
if (only_convolution == 1) {
for (i = 0; i < 512*7*7; i++) {
fprintf(out, "%g ", mem_block1_dense[i]);
}
}
else {
for (i = 0; i < dshape[2][1]; i++) {
fprintf(out, "%g ", mem_block2_dense[i]);
}
}
fprintf(out, "\n");
}
char *trimwhitespace(char *str)
{
char *end;
// Trim leading space
while (isspace((unsigned char)*str)) str++;
if (*str == 0) // All spaces?
return str;
// Trim trailing space
end = str + strlen(str) - 1;
while (end > str && isspace((unsigned char)*end)) end--;
// Write new null terminator
*(end + 1) = 0;
return str;
}
int main(int argc, char *argv[]) {
FILE *file_list, *results;
char buf[1024];
#ifndef _WIN32
struct timeval timeStart, timeEnd;
#else
time_t timeStart, timeEnd;
#endif
double deltaTime;
//char *weights_file;
char *image_list_file;
char *output_file;
int lvls = -1;
int only_convolution = 0;
#ifdef _OPENMP
numthreads = omp_get_num_procs() - 1;
#endif
/*if (numthreads < 1)
numthreads = 1;
numthreads = 1;
*/
if (argc != 4 && argc != 5) {
printf("Usage: <program.exe> <number of threads> <images list file> <output file> <only_convolution [optional]>\n");
return 0;
}
//weights_file = argv[1];
numthreads = atoi(argv[1]);
printf("Using %d threads\n", numthreads);
image_list_file = argv[2];
output_file = argv[3];
if (argc == 5) {
lvls = 13;
only_convolution = 1;
}
// ------ PAPI -------
papi_init();
// Start the timer
if (clock_gettime(CLOCK_MONOTONIC , &begin)) {
exit(EXIT_FAILURE);
}
// Start time in nanoseconds
start = begin.tv_sec*NANOS + begin.tv_nsec;
// ------ PAPI -------
init_memory();
file_list = fopen(image_list_file, "r");
if (file_list == NULL) {
printf("Check file list location: %s", image_list_file);
return 1;
}
results = fopen(output_file, "w");
if (results == NULL) {
printf("Couldn't open file for writing: %s", output_file);
return 1;
}
gettimeofday(&timeStart, NULL);
//read_weights(weights_file, lvls);
gettimeofday(&timeEnd, NULL);
deltaTime = get_seconds(timeStart, timeEnd);
printf("Reading weights: %.3lf sec\n", deltaTime);
// while (!feof(file_list)) {
gettimeofday(&timeStart, NULL);
fgets(buf, 1024, file_list);
// if (strlen(buf) == 0) {
// break;
// }
printf("%d\n", strlen(buf));
read_image(trimwhitespace(buf));
normalize_image();
// dump_image();
waca_papi_read("Hello");
get_VGG16_predict(only_convolution);
output_predictions(results, only_convolution);
waca_papi_read("Hello");
gettimeofday(&timeEnd, NULL);
deltaTime = get_seconds(timeStart, timeEnd);
printf("Infer image %s: %.3lf sec\n", buf, deltaTime);
// }
free_memory();
fclose(file_list);
return 0;
}
|
GB_unop__abs_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint8_uint8)
// op(A') function: GB (_unop_tran__abs_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
task-taskwait-nested.c | /*
* task-taskwait-nested.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include "ompt/ompt-signal.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(8) shared(var, a)
#pragma omp master
{
#pragma omp task shared(var, a)
{
#pragma omp task shared(var, a)
{
// wait for master to pass the taskwait
OMPT_SIGNAL(a);
OMPT_WAIT(a, 2);
var++;
}
}
// Give other thread time to steal the task and execute its child.
OMPT_WAIT(a, 1);
// Only directly generated children are guaranteed to be executed.
#pragma omp taskwait
OMPT_SIGNAL(a);
var++;
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-taskwait-nested.c:34
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-taskwait-nested.c:44
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
conv_kernel_fp16_arm82.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, Open AI Lab
* Author: xlchen@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include <sys/time.h>
#include "../conv_hcl_kernel.h"
#include "compiler_fp16.h"
#define PER_OUT_CHAN 16
void hgemm_4x16_a76(__fp16* biases, __fp16* input, __fp16* kernel, long kernel_size, __fp16* output,
long output_xy, long fused_relu);
void hgemm_4x4_a76(__fp16* biases, __fp16* input, __fp16* kernel, long kernel_size, __fp16* output,
long output_xy, long fused_relu);
void im2col_fp16_1x1(__fp16* input, long input_xy, __fp16* col, long col_cnt, long input_chan);
void im2col_fp16_3x3(__fp16* input, long input_x, long input_y, long input_chan, __fp16* col, long stride);
void im2col(__fp16* im, __fp16* col, int input_chan, int input_x, int input_y, int kernel_x, int kernel_y, int stride_x,
int stride_y, int dilation_x, int dilation_y, int pad_w0, int pad_w1, int pad_h0, int pad_h1, int output_x,
int output_y, int col_start, int col_end)
{
int kernel_size = kernel_x * kernel_y * input_chan;
int input_xy = input_x * input_y;
int pad_x = pad_w0;
int pad_y = pad_h0;
__fp16* cur_col = col + col_start * kernel_size;
int col_i, col_j, kch, ky, kx, i;
if((kernel_x == 1) && (kernel_y == 1) && (stride_x == 1) && (stride_y == 1))
{
{
int col_cnt = (col_end & -4) - (col_start & -4);
im2col_fp16_1x1(im + col_start, input_xy, cur_col, col_cnt, input_chan);
cur_col += col_cnt * kernel_size;
col_i = col_end & -4;
}
// final 4 input
if(col_end & 0x3)
{
for(col_j = 0; col_j < kernel_size; col_j++)
{
for(i = 0; i < 4; i++)
{
if((col_i + i) < col_end)
*cur_col++ = *(im + input_xy * col_j + col_i + i);
else
*cur_col++ = 0.0;
}
}
}
}
else if((kernel_x == 3) && (kernel_y == 3) && (dilation_x == 1) && (dilation_y == 1))
{
int is_pad0 = (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0);
for(col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
cur_col = col + col_i * kernel_size;
int imy0 = col_i / output_x;
int imy3 = (col_i + 3) / output_x;
int imx0 = col_i - imy0 * output_x;
int imx3 = (col_i + 3) - imy3 * output_x;
if((imy0 == imy3) &&
(is_pad0 || (imy0 != 0 && imx0 != 0 && imy0 != (output_y - 1) && imx3 != (output_x - 1))))
{
__fp16* l0 = im + (imy0 * stride_y - pad_y) * input_x + (imx0 * stride_x - pad_x);
{
im2col_fp16_3x3(l0, input_x, input_y, input_chan, cur_col, stride_x);
cur_col += 4 * kernel_size;
}
}
else
{
int cnt_y[4] = {imy0, (col_i + 1) / output_x, (col_i + 2) / output_x, imy3};
int cnt_x[4] = {imx0, col_i - cnt_y[1] * output_x + 1, col_i - cnt_y[2] * output_x + 2, imx3};
int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x,
cnt_x[2] * stride_x - pad_x, cnt_x[3] * stride_x - pad_x};
int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y,
cnt_y[2] * stride_y - pad_y, cnt_y[3] * stride_y - pad_y};
for(kch = 0; kch < input_chan; kch++)
for(ky = 0; ky < 3; ky++)
for(kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for(i = 0; i < 4; i++)
{
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0.0;
}
}
}
}
// final 4 input
if(col_end & 0x3)
{
int cnt_y[4] = {col_i / output_x, (col_i + 1) / output_x, (col_i + 2) / output_x, (col_i + 3) / output_x};
int cnt_x[4] = {col_i - cnt_y[0] * output_x, col_i - cnt_y[1] * output_x + 1,
col_i - cnt_y[2] * output_x + 2, col_i - cnt_y[3] * output_x + 3};
int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x,
cnt_x[3] * stride_x - pad_x};
int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y,
cnt_y[3] * stride_y - pad_y};
for(kch = 0; kch < input_chan; kch++)
for(ky = 0; ky < 3; ky++)
for(kx = 0; kx < 3; kx++)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for(i = 0; i < 4; i++)
{
if((col_i + i) < col_end && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 &&
imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0.0;
}
}
}
}
else
{ // for general cases
for(col_i = (col_start & -4); col_i < (col_end & -4); col_i += 4)
{
int cnt_y[4] = {col_i / output_x, (col_i + 1) / output_x, (col_i + 2) / output_x, (col_i + 3) / output_x};
int cnt_x[4] = {col_i - cnt_y[0] * output_x, col_i - cnt_y[1] * output_x + 1,
col_i - cnt_y[2] * output_x + 2, col_i - cnt_y[3] * output_x + 3};
int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x,
cnt_x[3] * stride_x - pad_x};
int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y,
cnt_y[3] * stride_y - pad_y};
for(kch = 0; kch < input_chan; kch++)
for(ky = 0; ky < (kernel_y * dilation_y); ky += dilation_y)
for(kx = 0; kx < (kernel_x * dilation_x); kx += dilation_x)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for(i = 0; i < 4; i++)
{
if(imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 && imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0.0;
}
}
}
// final 4 input
if(col_end & 0x3)
{
int cnt_y[4] = {col_i / output_x, (col_i + 1) / output_x, (col_i + 2) / output_x, (col_i + 3) / output_x};
int cnt_x[4] = {col_i - cnt_y[0] * output_x, col_i - cnt_y[1] * output_x + 1,
col_i - cnt_y[2] * output_x + 2, col_i - cnt_y[3] * output_x + 3};
int imx_start[4] = {cnt_x[0] * stride_x - pad_x, cnt_x[1] * stride_x - pad_x, cnt_x[2] * stride_x - pad_x,
cnt_x[3] * stride_x - pad_x};
int imy_start[4] = {cnt_y[0] * stride_y - pad_y, cnt_y[1] * stride_y - pad_y, cnt_y[2] * stride_y - pad_y,
cnt_y[3] * stride_y - pad_y};
for(kch = 0; kch < input_chan; kch++)
for(ky = 0; ky < (kernel_y * dilation_y); ky += dilation_y)
for(kx = 0; kx < (kernel_x * dilation_x); kx += dilation_x)
{
int imx[4] = {imx_start[0] + kx, imx_start[1] + kx, imx_start[2] + kx, imx_start[3] + kx};
int imy[4] = {imy_start[0] + ky, imy_start[1] + ky, imy_start[2] + ky, imy_start[3] + ky};
for(i = 0; i < 4; i++)
{
if((col_i + i) < col_end && imx[i] >= 0 && imx[i] < input_x && imy[i] >= 0 &&
imy[i] < input_y)
*cur_col++ = *(im + input_xy * kch + input_x * imy[i] + imx[i]);
else
*cur_col++ = 0.0;
}
}
}
}
}
// interleave 0 ~ (output_chan & -16) kernels with 16 in form of k[0-15][0],k[0-15][1],k[0-15][2]..
// interleave (output_chan & -16) ~ ((output_chan + 3) & -4) tail kernls with 4 in form of
// k[0-3][0],k[0-3][1],k[0-3][2]..
void interleave_kernel(__fp16* kernel, __fp16* kernel_interleaved, int kernel_chan, int kernel_size)
{
int i, j;
__fp16 *cur_kernel0, *cur_kernel1, *cur_kernel2, *cur_kernel3, *cur_kernel4, *cur_kernel5, *cur_kernel6,
*cur_kernel7;
__fp16 *cur_kernel8, *cur_kernel9, *cur_kernel10, *cur_kernel11, *cur_kernel12, *cur_kernel13, *cur_kernel14,
*cur_kernel15;
__fp16* cur_kernel_interleaved = kernel_interleaved;
// interleave 16 kernels
for(i = 0; i < (kernel_chan & -16); i += 16)
{
cur_kernel0 = kernel + kernel_size * i;
cur_kernel1 = kernel + kernel_size * (i + 1);
cur_kernel2 = kernel + kernel_size * (i + 2);
cur_kernel3 = kernel + kernel_size * (i + 3);
cur_kernel4 = kernel + kernel_size * (i + 4);
cur_kernel5 = kernel + kernel_size * (i + 5);
cur_kernel6 = kernel + kernel_size * (i + 6);
cur_kernel7 = kernel + kernel_size * (i + 7);
cur_kernel8 = kernel + kernel_size * (i + 8);
cur_kernel9 = kernel + kernel_size * (i + 9);
cur_kernel10 = kernel + kernel_size * (i + 10);
cur_kernel11 = kernel + kernel_size * (i + 11);
cur_kernel12 = kernel + kernel_size * (i + 12);
cur_kernel13 = kernel + kernel_size * (i + 13);
cur_kernel14 = kernel + kernel_size * (i + 14);
cur_kernel15 = kernel + kernel_size * (i + 15);
for(j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel0[j];
*(cur_kernel_interleaved++) = cur_kernel1[j];
*(cur_kernel_interleaved++) = cur_kernel2[j];
*(cur_kernel_interleaved++) = cur_kernel3[j];
*(cur_kernel_interleaved++) = cur_kernel4[j];
*(cur_kernel_interleaved++) = cur_kernel5[j];
*(cur_kernel_interleaved++) = cur_kernel6[j];
*(cur_kernel_interleaved++) = cur_kernel7[j];
*(cur_kernel_interleaved++) = cur_kernel8[j];
*(cur_kernel_interleaved++) = cur_kernel9[j];
*(cur_kernel_interleaved++) = cur_kernel10[j];
*(cur_kernel_interleaved++) = cur_kernel11[j];
*(cur_kernel_interleaved++) = cur_kernel12[j];
*(cur_kernel_interleaved++) = cur_kernel13[j];
*(cur_kernel_interleaved++) = cur_kernel14[j];
*(cur_kernel_interleaved++) = cur_kernel15[j];
}
}
for(i = (kernel_chan & -16); i < (kernel_chan & -4); i += 4)
{
cur_kernel0 = kernel + kernel_size * i;
cur_kernel1 = kernel + kernel_size * (i + 1);
cur_kernel2 = kernel + kernel_size * (i + 2);
cur_kernel3 = kernel + kernel_size * (i + 3);
for(j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel0[j];
*(cur_kernel_interleaved++) = cur_kernel1[j];
*(cur_kernel_interleaved++) = cur_kernel2[j];
*(cur_kernel_interleaved++) = cur_kernel3[j];
}
}
// last 4 kernel
cur_kernel0 = kernel + kernel_size * i;
cur_kernel1 = kernel + kernel_size * (i + 1);
cur_kernel2 = kernel + kernel_size * (i + 2);
if((kernel_chan & 0x3) == 3)
{
for(j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel0[j];
*(cur_kernel_interleaved++) = cur_kernel1[j];
*(cur_kernel_interleaved++) = cur_kernel2[j];
*(cur_kernel_interleaved++) = 0.0;
}
}
else if((kernel_chan & 0x3) == 2)
{
for(j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel0[j];
*(cur_kernel_interleaved++) = cur_kernel1[j];
*(cur_kernel_interleaved++) = 0.0;
*(cur_kernel_interleaved++) = 0.0;
}
}
else if((kernel_chan & 0x3) == 1)
{
for(j = 0; j < kernel_size; j++)
{
*(cur_kernel_interleaved++) = cur_kernel0[j];
*(cur_kernel_interleaved++) = 0.0;
*(cur_kernel_interleaved++) = 0.0;
*(cur_kernel_interleaved++) = 0.0;
}
}
}
static void interleave(struct ir_tensor * filter, struct conv_priv_info* priv_info, struct conv_param* param)
{
int group = param->group;
int out_chan = filter->dims[0] / group;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int kernel_size_g = kernel_size * out_chan;
int kernel_interleaved_size_g = kernel_size * ((out_chan + 3) & -4);
__fp16* kernel = (__fp16*)filter->data;
__fp16* interleave_buf = (__fp16*)priv_info->interleave_buffer;
for(int g = 0; g < group; g++)
{
__fp16* cur_kernel = kernel + g * kernel_size_g;
__fp16* cur_interleave = interleave_buf + g * kernel_interleaved_size_g;
interleave_kernel(cur_kernel, cur_interleave, out_chan, kernel_size);
}
}
static void hgemm_set(__fp16* col, __fp16* kernel, __fp16* biases, __fp16* output, int kernel_size,
int ch_start, int ch_end, int output_xy, int relu_fused, int num_thread, int cpu_affinity)
{
int nn_outch = ch_end / PER_OUT_CHAN;
int col_end3 = output_xy & 0x3;
if (col_end3)
{
#pragma omp parallel for num_threads(num_thread)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
__fp16* biasptr = biases ? (__fp16* )(biases + p) : NULL;
__fp16* kernel_tmp = (__fp16* )(kernel + p * kernel_size);
__fp16* output_tmp = (__fp16* )(output + p * output_xy);
int col_line = 0;
for(col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
__fp16* col_tmp = ( __fp16* )(col + col_line * kernel_size);
hgemm_4x16_a76(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, relu_fused);
}
{
__fp16 result[64];
__fp16* col_tmp = ( __fp16* )(col + col_line * kernel_size);
hgemm_4x16_a76(biasptr, col_tmp, kernel_tmp, kernel_size, result, 4, relu_fused);
for(int i = 0; i < 16; i++)
{
for(int j = 0; j < (col_end3); j++)
*(output + (p + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * PER_OUT_CHAN;
__fp16* biasptr = biases ? (__fp16* )(biases + p) : NULL;
__fp16* kernel_tmp = (__fp16* )(kernel + p * kernel_size);
__fp16* output_tmp = (__fp16* )(output + p * output_xy);
for(int col_line = 0; col_line + 3 < output_xy; col_line += 4)
{
__fp16* col_tmp = (__fp16* )(col + col_line * kernel_size);
hgemm_4x16_a76(biasptr, col_tmp, kernel_tmp, kernel_size, output_tmp + col_line, output_xy, relu_fused);
}
}
}
}
static void hgemm4x4(__fp16* col, __fp16* kernel, __fp16* biases, __fp16* output, int kernel_size,
int ch_start, int ch_end, int output_xy, int relu_fused, int num_thread, int cpu_affinity)
{
__fp16 result[16];
__fp16* cur_biases = NULL;
int col_line, kernel_num;
__fp16 *cur_col, *cur_kernel, *cur_output;
int i, j;
int col_end3 = output_xy & 0x3;
int kernel_end3 = ch_end & 0x3;
for(kernel_num = ch_start; kernel_num < (ch_end & -4); kernel_num += 4)
{
if(biases)
cur_biases = biases + kernel_num;
cur_kernel = kernel + kernel_num * kernel_size;
cur_output = output + kernel_num * output_xy;
for(col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
cur_col = col + col_line * kernel_size;
hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, cur_output + col_line, output_xy, relu_fused);
}
if(col_end3)
{
cur_col = col + col_line * kernel_size;
hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, relu_fused);
for(i = 0; i < 4; i++)
{
for(j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
if(kernel_end3)
{
if(biases)
cur_biases = biases + kernel_num;
cur_kernel = kernel + kernel_num * kernel_size;
for(col_line = 0; col_line < (output_xy & -4); col_line += 4)
{
cur_col = col + col_line * kernel_size;
hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, relu_fused);
for(i = 0; i < kernel_end3; i++)
for(j = 0; j < 4; j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
if(col_end3)
{
cur_col = col + col_line * kernel_size;
hgemm_4x4_a76(cur_biases, cur_col, cur_kernel, kernel_size, result, 4, relu_fused);
for(i = 0; i < (kernel_end3); i++)
{
for(j = 0; j < (col_end3); j++)
*(output + (kernel_num + i) * output_xy + col_line + j) = result[(i << 2) + j];
}
}
}
}
int fp16_conv_hcl_get_shared_mem_size(struct ir_tensor* input , \
struct ir_tensor* output , \
struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int mem_size = sizeof(__fp16) * kernel_size * ((output_xy + 3) & -4) + 128;
return mem_size;
}
static int get_private_mem_size(struct ir_tensor * filter, struct conv_param* param)
{
int group = param->group;
int out_chan = filter->dims[0] / group;
int kernel_size = filter->dims[1] * filter->dims[2] * filter->dims[3];
int mem_size = sizeof(__fp16) * kernel_size * ((out_chan + 3) & -4) * group + 128;
return mem_size;
}
int fp16_conv_hcl_prerun(struct ir_tensor* input_tensor , \
struct ir_tensor* filter_tensor , \
struct ir_tensor* output_tensor , \
struct conv_priv_info* priv_info , \
struct conv_param* param)
{
if (!priv_info->external_im2col_mem)
{
int mem_size = fp16_conv_hcl_get_shared_mem_size(input_tensor , output_tensor , param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
interleave(filter_tensor, priv_info, param);
return 0;
}
int fp16_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if(!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if(!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
return 0;
}
int fp16_conv_hcl_run(struct ir_tensor* input_tensor , \
struct ir_tensor* filter_tensor , \
struct ir_tensor* bias_tensor , \
struct ir_tensor* output_tensor , \
struct conv_priv_info* priv_info , \
struct conv_param* param, \
int num_thread, int cpu_affinity)
{
/* param */
// printf("run into fp16_conv_hcl_run!\n");
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_h1 = param->pad_h1;
int pad_w0 = param->pad_w0;
int pad_w1 = param->pad_w1;
long fused_relu = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* buffer addr */
__fp16* input_buf = (__fp16*)input_tensor->data;
__fp16* output_buf = (__fp16*)output_tensor->data;
__fp16* col_buf = (__fp16*)priv_info->im2col_buffer;
__fp16* interleave_buf = (__fp16*)priv_info->interleave_buffer;
__fp16* biases_buf = NULL;
if (bias_tensor)
biases_buf = (__fp16*)bias_tensor->data;
int sgemm_set_chan = out_c / PER_OUT_CHAN * PER_OUT_CHAN;
int sgemm_set_remain = out_c % PER_OUT_CHAN;
for(int n = 0; n < batch; n++) // batch size
{
for(int g = 0; g < group; g++)
{
/* im2col */
__fp16* cur_input = input_buf + (n * group + g) *input_size;
im2col(cur_input, col_buf, in_c, in_w, in_h, kernel_w, kernel_h,
stride_w, stride_h, dilation_w, dilation_h, pad_w0, pad_w1, pad_h0, pad_h1,
out_w, out_h, 0, out_hw);
/* gemm */
__fp16* cur_kernel = interleave_buf + g * (kernel_size * ((out_c + 3) & -4));
__fp16* cur_output = output_buf + (n * group + g) * output_size;
__fp16* cur_bias = biases_buf? (biases_buf + g * out_c) : NULL;
hgemm_set(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, 0, sgemm_set_chan, out_hw, fused_relu, num_thread, cpu_affinity);
if(sgemm_set_remain)
{
hgemm4x4(col_buf, cur_kernel, cur_bias, cur_output, kernel_size, sgemm_set_chan, out_c, out_hw, fused_relu, num_thread, cpu_affinity);
}
}
}
return 0;
}
|
tabu_search.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_SOLVER_TABU_SEARCH_TABU_SEARCH_H__
#define PRINTEMPS_SOLVER_TABU_SEARCH_TABU_SEARCH_H__
#include "../memory.h"
#include "tabu_search_move_score.h"
#include "tabu_search_print.h"
#include "tabu_search_termination_status.h"
#include "tabu_search_result.h"
namespace printemps {
namespace solver {
namespace tabu_search {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
TabuSearchResult<T_Variable, T_Expression> solve(
model::Model<T_Variable, T_Expression>* a_model_ptr, //
const option::Option& a_OPTION, //
const std::vector<multi_array::ValueProxy<T_Variable>>& //
a_INITIAL_VARIABLE_VALUE_PROXIES, //
const solution::IncumbentHolder<T_Variable, T_Expression>& //
a_INCUMBENT_HOLDER, //
const Memory a_MEMORY) {
/**
* Define type aliases.
*/
using Model_T = model::Model<T_Variable, T_Expression>;
using Result_T = TabuSearchResult<T_Variable, T_Expression>;
using IncumbentHolder_T =
solution::IncumbentHolder<T_Variable, T_Expression>;
using Move_T = neighborhood::Move<T_Variable, T_Expression>;
using MoveScore = TabuSearchMoveScore;
/**
* Start to measure computational time.
*/
utility::TimeKeeper time_keeper;
/**
* Copy arguments as local variables.
*/
Model_T* model_ptr = a_model_ptr;
option::Option option = a_OPTION;
Memory memory = a_MEMORY;
IncumbentHolder_T incumbent_holder = a_INCUMBENT_HOLDER;
/**
* Reset the local augmented incumbent.
*/
incumbent_holder.reset_local_augmented_incumbent();
/**
* Prepare a random generator, which is used for shuffling moves.
*/
std::mt19937 get_rand_mt(option.tabu_search.seed);
/**
* Initialize the solution and update the model.
*/
model_ptr->import_variable_values(a_INITIAL_VARIABLE_VALUE_PROXIES);
model_ptr->update();
solution::SolutionScore current_solution_score = model_ptr->evaluate({});
solution::SolutionScore previous_solution_score = current_solution_score;
int update_status = incumbent_holder.try_update_incumbent(
model_ptr, current_solution_score);
int total_update_status =
solution::IncumbentHolderConstant::STATUS_NO_UPDATED;
/**
* Reset the last update iterations.
*/
memory.reset_last_update_iterations();
/**
* Set up the tabu tenure and related parameters.
*/
int original_tabu_tenure =
std::min(option.tabu_search.initial_tabu_tenure,
model_ptr->number_of_mutable_variables());
int tabu_tenure = original_tabu_tenure;
double intensity_previous = 0.0;
double intensity_current = 0.0;
int intensity_increase_count = 0;
int intensity_decrease_count = 0;
int last_tabu_tenure_updated_iteration = 0;
/**
* Prepare feasible solutions holder.
*/
std::vector<solution::SparseSolution<T_Variable, T_Expression>>
feasible_solutions;
/**
* Reset the variable improvability.
*/
model_ptr->reset_variable_objective_improvability();
model_ptr->reset_variable_feasibility_improvability();
/**
* Prepare other local variables.
*/
int number_of_all_neighborhoods = 0;
int number_of_feasible_neighborhoods = 0;
int number_of_permissible_neighborhoods = 0;
int number_of_improvable_neighborhoods = 0;
std::vector<solution::SolutionScore> trial_solution_scores;
std::vector<MoveScore> trial_move_scores;
std::vector<double> total_scores;
std::vector<double> infeasible_local_penalties;
int last_local_augmented_incumbent_update_iteration = -1;
int last_global_augmented_incumbent_update_iteration = -1;
int last_feasible_incumbent_update_iteration = -1;
int local_augmented_incumbent_update_count = 0;
TabuSearchTerminationStatus termination_status =
TabuSearchTerminationStatus::ITERATION_OVER;
neighborhood::Move<T_Variable, T_Expression> previous_move;
neighborhood::Move<T_Variable, T_Expression> current_move;
bool is_few_permissible_neighborhood = false;
bool is_found_new_feasible_solution = false;
double min_objective = current_solution_score.objective;
double max_objective = current_solution_score.objective;
double min_local_penalty = HUGE_VALF;
if (!current_solution_score.is_feasible) {
min_local_penalty = current_solution_score.local_penalty;
}
/**
* Print the header of optimization progress table and print the initial
* solution status.
*/
utility::print_single_line(option.verbose >= option::verbose::Full);
utility::print_message("Tabu Search starts.",
option.verbose >= option::verbose::Full);
print_table_header(option.verbose >= option::verbose::Full);
print_table_initial(model_ptr, //
current_solution_score, //
incumbent_holder, //
option.verbose >= option::verbose::Full);
/**
* Iterations start.
*/
int iteration = 0;
while (true) {
/**
* Check the terminating condition.
*/
double elapsed_time = time_keeper.clock();
if (elapsed_time > option.tabu_search.time_max) {
termination_status = TabuSearchTerminationStatus::TIME_OVER;
break;
}
if (elapsed_time + option.tabu_search.time_offset > option.time_max) {
termination_status = TabuSearchTerminationStatus::TIME_OVER;
break;
}
if (iteration >= option.tabu_search.iteration_max) {
termination_status = TabuSearchTerminationStatus::ITERATION_OVER;
break;
}
if (incumbent_holder.feasible_incumbent_objective() <=
option.target_objective_value) {
termination_status = TabuSearchTerminationStatus::REACH_TARGET;
break;
}
if (local_augmented_incumbent_update_count >
option.tabu_search.pruning_rate_threshold *
option.tabu_search.iteration_max) {
termination_status = TabuSearchTerminationStatus::EARLY_STOP;
break;
}
/**
* Update the moves.
*/
bool is_enabled_improvability_screening =
(option.improvability_screening_mode !=
option::improvability_screening_mode::Off);
bool accept_all = true;
bool accept_objective_improvable = true;
bool accept_feasibility_improvable = true;
if (model_ptr->is_linear() && is_enabled_improvability_screening) {
/**
* If the option improvability_screening_mode is not None,
* only improvable moves will be generated.
*/
auto changed_variable_ptrs = utility::to_vector(
neighborhood::related_variable_ptrs(current_move));
if (iteration == 0) {
model_ptr->update_variable_objective_improvability();
} else {
model_ptr->update_variable_objective_improvability(
changed_variable_ptrs);
}
switch (option.improvability_screening_mode) {
case option::improvability_screening_mode::Soft: {
if (model_ptr->is_feasible()) {
accept_all = false;
accept_objective_improvable = true;
accept_feasibility_improvable = false;
} else {
model_ptr->reset_variable_feasibility_improvability();
model_ptr->update_variable_feasibility_improvability();
accept_all = false;
accept_objective_improvable = true;
accept_feasibility_improvable = true;
}
break;
}
case option::improvability_screening_mode::Aggressive: {
if (model_ptr->is_feasible()) {
accept_all = false;
accept_objective_improvable = true;
accept_feasibility_improvable = false;
} else {
model_ptr->reset_variable_feasibility_improvability();
model_ptr->update_variable_feasibility_improvability();
accept_all = false;
accept_objective_improvable = false;
accept_feasibility_improvable = true;
}
break;
}
case option::improvability_screening_mode::Intensive: {
if (model_ptr->is_feasible()) {
accept_all = false;
accept_objective_improvable = true;
accept_feasibility_improvable = false;
} else {
auto changed_constraint_ptrs = utility::to_vector(
current_move.related_constraint_ptrs);
if (iteration == 0) {
model_ptr
->reset_variable_feasibility_improvability();
model_ptr
->update_variable_feasibility_improvability();
} else {
model_ptr->reset_variable_feasibility_improvability(
changed_constraint_ptrs);
model_ptr
->update_variable_feasibility_improvability(
changed_constraint_ptrs);
}
accept_all = false;
accept_objective_improvable = false;
accept_feasibility_improvable = true;
}
break;
}
default: {
throw std::logic_error(utility::format_error_location(
__FILE__, __LINE__, __func__,
"The specified improvability screening mode is "
"invalid."));
}
}
}
model_ptr->neighborhood().update_moves(
accept_all, //
accept_objective_improvable, //
accept_feasibility_improvable, //
option.is_enabled_parallel_neighborhood_update);
if (option.tabu_search.is_enabled_shuffle) {
model_ptr->neighborhood().shuffle_moves(&get_rand_mt);
}
const auto& trial_move_ptrs = model_ptr->neighborhood().move_ptrs();
int number_of_moves = trial_move_ptrs.size();
if (option.tabu_search.is_enabled_move_curtail) {
number_of_moves = static_cast<int>(
floor(option.tabu_search.move_preserve_rate * number_of_moves));
}
/**
* If the number of the moves is zero, the tabu search iterations will
* be terminated.
*/
if (number_of_moves == 0) {
if (model_ptr->is_linear() && model_ptr->is_feasible()) {
/**
* If the current solution is feasible and there is no
* improvable solution, the solution should be an optimum.
* It can happen for decomp2 instance in MIPLIB 2017.
*/
termination_status = TabuSearchTerminationStatus::OPTIMAL;
for (const auto& variable_ptr :
model_ptr->variable_reference().variable_ptrs) {
if (variable_ptr->is_objective_improvable()) {
termination_status =
TabuSearchTerminationStatus::NO_MOVE;
break;
}
}
break;
} else {
termination_status = TabuSearchTerminationStatus::NO_MOVE;
break;
}
}
/**
* Reserve elements for vectors by the number of the moves. This step is
* required for each iteration because the number of the moves can be
* changed.
*/
trial_solution_scores.resize(number_of_moves);
trial_move_scores.resize(number_of_moves);
total_scores.resize(number_of_moves);
#ifdef _OPENMP
#pragma omp parallel for if (option.is_enabled_parallel_evaluation) \
schedule(static)
#endif
for (auto i = 0; i < number_of_moves; i++) {
/**
* The neighborhood solutions will be evaluated in parallel by fast
* or ordinary(slow) evaluation methods.
*/
#ifndef _MPS_SOLVER
if (model_ptr->is_enabled_fast_evaluation()) {
#endif
model_ptr->evaluate(&trial_solution_scores[i], //
*trial_move_ptrs[i], //
current_solution_score);
#ifndef _MPS_SOLVER
} else {
model_ptr->evaluate(&trial_solution_scores[i], //
*trial_move_ptrs[i]);
}
#endif
evaluate_move(&trial_move_scores[i], //
*trial_move_ptrs[i], //
iteration, //
memory, //
option, //
tabu_tenure);
total_scores[i] =
trial_solution_scores[i].local_augmented_objective +
trial_move_scores[i].frequency_penalty;
/**
* If the move is "tabu", it will be set lower priorities in
* selecting a move for the next solution.
*/
if (!trial_move_scores[i].is_permissible) {
total_scores[i] += constant::LARGE_VALUE_50;
}
/**
* If the move is special neighborhood moves, it must improves
* objective or feasibility.
*/
if (trial_move_ptrs[i]->is_special_neighborhood_move &&
!(trial_solution_scores[i].is_objective_improvable ||
trial_solution_scores[i].is_feasibility_improvable)) {
total_scores[i] += constant::LARGE_VALUE_100;
}
}
/**
* Select moves for the next solution.
*/
int argmin_global_augmented_objective = std::distance(
trial_solution_scores.begin(),
min_element(trial_solution_scores.begin(),
trial_solution_scores.end(),
[](const auto& a_FIRST, const auto& a_SECOND) {
return a_FIRST.global_augmented_objective <
a_SECOND.global_augmented_objective;
}));
int argmin_total_score = utility::argmin(total_scores);
int selected_index = 0;
bool is_aspirated = false;
if (iteration < option.tabu_search.number_of_initial_modification) {
/**
* For diversification, the move for next solution will be randomly
* selected for initial several iteration.
*/
selected_index = get_rand_mt() % number_of_moves;
} else {
/**
* The move for next solution will be determined by evaluations of
* solutions and moves after the inital modifications.
*/
selected_index = argmin_total_score;
/**
* A move which improves the augmented incumbent solution can be
* accepted (optional).
*/
if (option.tabu_search.ignore_tabu_if_global_incumbent) {
if (trial_solution_scores[argmin_global_augmented_objective]
.global_augmented_objective +
constant::EPSILON <
incumbent_holder.global_augmented_incumbent_objective()) {
selected_index = argmin_global_augmented_objective;
if (!trial_move_scores[selected_index].is_permissible) {
is_aspirated = true;
}
}
}
}
/**
* Backup the previous solution score and move.
*/
previous_solution_score = current_solution_score;
previous_move = current_move;
/**
* Update the model by the selected move.
*/
Move_T* move_ptr = trial_move_ptrs[selected_index];
model_ptr->update(*move_ptr);
/**
* Update the current solution score and move.
*/
current_solution_score = trial_solution_scores[selected_index];
current_move = *move_ptr;
min_objective =
std::min(min_objective, current_solution_score.objective);
max_objective =
std::min(max_objective, current_solution_score.objective);
if (!current_solution_score.is_feasible) {
min_local_penalty = std::min(min_local_penalty,
current_solution_score.local_penalty);
}
/**
* Update the status.
*/
update_status = incumbent_holder.try_update_incumbent(
model_ptr, current_solution_score);
total_update_status = update_status | total_update_status;
if (current_solution_score.is_feasible) {
is_found_new_feasible_solution = true;
}
/**
* Store the current feasible solution.
*/
if (option.is_enabled_store_feasible_solutions &&
current_solution_score.is_feasible) {
feasible_solutions.push_back(model_ptr->export_plain_solution());
}
/**
* Update the memory.
*/
int random_width = static_cast<int>(
option.tabu_search.tabu_tenure_randomize_rate * tabu_tenure);
memory.update(*move_ptr, //
iteration, //
random_width, //
&get_rand_mt);
/**
* To avoid cycling, each special neighborhood can be used only once in
* one tabu search loop.
*/
if (move_ptr->is_special_neighborhood_move) {
move_ptr->is_available = false;
}
/**
* Calculate various statistics for logging.
*/
if (update_status & solution::IncumbentHolderConstant::
STATUS_LOCAL_AUGMENTED_INCUMBENT_UPDATE) {
last_local_augmented_incumbent_update_iteration = iteration;
}
if (update_status & solution::IncumbentHolderConstant::
STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) {
last_global_augmented_incumbent_update_iteration = iteration;
}
if (update_status & solution::IncumbentHolderConstant::
STATUS_FEASIBLE_INCUMBENT_UPDATE) {
last_feasible_incumbent_update_iteration = iteration;
}
/**
* For pruning, count updating of the local augmented incumbent without
* global augmented incumbent improvement.
*/
if (update_status == solution::IncumbentHolderConstant::
STATUS_LOCAL_AUGMENTED_INCUMBENT_UPDATE) {
local_augmented_incumbent_update_count++;
} else if (update_status &
solution::IncumbentHolderConstant::
STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) {
local_augmented_incumbent_update_count = 0;
}
/**
* Calculate the number of moves for each type.
*/
number_of_all_neighborhoods = number_of_moves;
if (iteration % std::max(option.tabu_search.log_interval, 1) == 0 ||
update_status > 0) {
number_of_feasible_neighborhoods = 0;
number_of_permissible_neighborhoods = 0;
number_of_improvable_neighborhoods = 0;
for (const auto& score : trial_solution_scores) {
if (score.is_feasible) {
number_of_feasible_neighborhoods++;
}
if (score.is_objective_improvable ||
score.is_feasibility_improvable) {
number_of_improvable_neighborhoods++;
}
}
for (const auto& score : trial_move_scores) {
if (score.is_permissible) {
number_of_permissible_neighborhoods++;
}
}
if (number_of_permissible_neighborhoods == 0) {
is_few_permissible_neighborhood = true;
}
} else {
bool is_few_permissible_neighborhood_temp = true;
for (const auto& score : trial_move_scores) {
if (score.is_permissible) {
is_few_permissible_neighborhood_temp = false;
break;
}
}
if (is_few_permissible_neighborhood_temp) {
is_few_permissible_neighborhood = true;
}
}
/**
* Register a chain move.
*/
if (iteration > 0 && option.is_enabled_chain_move) {
if ((previous_move.sense == neighborhood::MoveSense::Binary &&
current_move.sense == neighborhood::MoveSense::Binary &&
previous_move.alterations.front().second !=
current_move.alterations.front().second) ||
(previous_move.sense == neighborhood::MoveSense::Chain &&
current_move.sense == neighborhood::MoveSense::Chain)) {
Move_T chain_move;
if (previous_move.alterations.front().first <
current_move.alterations.front().first)
chain_move = previous_move + current_move;
else {
chain_move = current_move + previous_move;
}
if (chain_move.overlap_rate >
option.chain_move_overlap_rate_threshold &&
!neighborhood::has_duplicate_variable(chain_move)) {
auto back_chain_move = chain_move;
for (auto&& alteration : back_chain_move.alterations) {
alteration.second = 1 - alteration.second;
}
model_ptr->neighborhood().chain().register_move(chain_move);
model_ptr->neighborhood().chain().register_move(
back_chain_move);
}
}
}
if (option.tabu_search.is_enabled_automatic_tabu_tenure_adjustment) {
if ((update_status &
solution::IncumbentHolderConstant::
STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) &&
tabu_tenure > original_tabu_tenure) {
/**
* The tabu tenure will be reverted to the original value if it
* has been increased and the global incumbent is updated.
*/
tabu_tenure = original_tabu_tenure;
last_tabu_tenure_updated_iteration = iteration;
intensity_decrease_count = 0;
intensity_increase_count = 0;
utility::print_debug("Tabu tenure reverted: " +
std::to_string(tabu_tenure) + ".",
option.verbose >= option::verbose::Debug);
} else if ((iteration - last_tabu_tenure_updated_iteration) %
(tabu_tenure + 1) ==
0) {
/**
* The intensity of searching will be computed with the interval
* of tabu_tenure+1. The tabu tenure will be increased if the
* intensity has grown up, and decreased if the intensity has
* been reduced.
*/
intensity_previous = intensity_current;
intensity_current = memory.intensity();
if (intensity_current > intensity_previous) {
intensity_increase_count++;
intensity_decrease_count = 0;
if (intensity_increase_count >
option.tabu_search.intensity_increase_count_threshold) {
intensity_increase_count = 0;
tabu_tenure =
std::min(tabu_tenure + 1,
model_ptr->number_of_mutable_variables());
last_tabu_tenure_updated_iteration = iteration;
utility::print_debug(
"Tabu tenure increased: " +
std::to_string(tabu_tenure) + ".",
option.verbose >= option::verbose::Debug);
}
} else {
intensity_decrease_count++;
intensity_increase_count = 0;
if (intensity_decrease_count >
option.tabu_search.intensity_decrease_count_threshold) {
intensity_decrease_count = 0;
tabu_tenure =
std::max(tabu_tenure - 1,
std::max(1, original_tabu_tenure / 2));
last_tabu_tenure_updated_iteration = iteration;
utility::print_debug(
"Tabu tenure decreased: " +
std::to_string(tabu_tenure) + ".",
option.verbose >= option::verbose::Debug);
}
}
}
}
/**
* Print the optimization progress.
*/
if (iteration % std::max(option.tabu_search.log_interval, 1) == 0 ||
update_status > 0) {
print_table_body(model_ptr, //
iteration, //
current_move.is_special_neighborhood_move, //
number_of_all_neighborhoods, //
number_of_feasible_neighborhoods, //
number_of_permissible_neighborhoods, //
number_of_improvable_neighborhoods, //
current_solution_score, //
update_status, //
incumbent_holder, //
is_aspirated, //
option.verbose >= option::verbose::Full);
}
if (option.tabu_search.is_enabled_automatic_break) {
/**
* If the local penalty us sufficiently larger than objective
* sensitivity, the current loop will be terminated and the
* local penalty coefficients will be adjusted.
*/
constexpr int ITERATION_MIN = 10;
constexpr double MARGIN = 100.0;
if (iteration > ITERATION_MIN &&
current_solution_score.is_feasible) {
infeasible_local_penalties.clear();
for (const auto& score : trial_solution_scores) {
if (!score.is_feasible) {
infeasible_local_penalties.push_back(
score.local_penalty);
}
}
if (infeasible_local_penalties.size() > 0) {
auto argminmax_objective_sensitivity_score_ptr =
std::minmax_element(
trial_solution_scores.begin(),
trial_solution_scores.end(),
[](const auto& a_FIRST, const auto& a_SECOND) {
return a_FIRST.objective_improvement <
a_SECOND.objective_improvement;
});
double max_objective_sensitivity =
std::max(argminmax_objective_sensitivity_score_ptr
.second->objective_improvement,
-argminmax_objective_sensitivity_score_ptr
.first->objective_improvement);
if (max_objective_sensitivity * MARGIN <
utility::min(infeasible_local_penalties)) {
termination_status =
TabuSearchTerminationStatus::EARLY_STOP;
break;
}
}
}
}
iteration++;
}
/**
* Print the footer of the optimization progress table.
*/
print_table_footer(option.verbose >= option::verbose::Full);
/**
* Prepare the result.
*/
Result_T result;
result.incumbent_holder = incumbent_holder;
result.memory = memory;
result.total_update_status = total_update_status;
result.tabu_tenure = tabu_tenure;
result.number_of_iterations = iteration;
result.last_local_augmented_incumbent_update_iteration =
last_local_augmented_incumbent_update_iteration;
result.last_global_augmented_incumbent_update_iteration =
last_global_augmented_incumbent_update_iteration;
result.last_feasible_incumbent_update_iteration =
last_feasible_incumbent_update_iteration;
result.is_few_permissible_neighborhood = is_few_permissible_neighborhood;
result.is_found_new_feasible_solution = is_found_new_feasible_solution;
auto abs_max_objective = std::max(fabs(max_objective), fabs(min_objective));
result.objective_constraint_rate =
std::max(1.0, std::max(abs_max_objective, //
max_objective - min_objective)) /
std::max(1.0, min_local_penalty);
result.termination_status = termination_status;
result.feasible_solutions = feasible_solutions;
return result;
}
} // namespace tabu_search
} // namespace solver
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
convolution_1x1_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4_bf16s_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-4a-inch/4a-outch/4b
#if __aarch64__
kernel_tm_pack4.create(2 * 1, inch / 4, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16);
#else
kernel_tm_pack4.create(1, inch / 4, outch / 4, (size_t)2u * 16, 16);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
const float* k4 = (const float*)kernel + (q + 4) * inch;
const float* k5 = (const float*)kernel + (q + 5) * inch;
const float* k6 = (const float*)kernel + (q + 6) * inch;
const float* k7 = (const float*)kernel + (q + 7) * inch;
unsigned short* g0 = kernel_tm_pack4.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = float32_to_bfloat16(k0[0]);
g0[1] = float32_to_bfloat16(k1[0]);
g0[2] = float32_to_bfloat16(k2[0]);
g0[3] = float32_to_bfloat16(k3[0]);
g0[4] = float32_to_bfloat16(k4[0]);
g0[5] = float32_to_bfloat16(k5[0]);
g0[6] = float32_to_bfloat16(k6[0]);
g0[7] = float32_to_bfloat16(k7[0]);
g0[8] = float32_to_bfloat16(k0[1]);
g0[9] = float32_to_bfloat16(k1[1]);
g0[10] = float32_to_bfloat16(k2[1]);
g0[11] = float32_to_bfloat16(k3[1]);
g0[12] = float32_to_bfloat16(k4[1]);
g0[13] = float32_to_bfloat16(k5[1]);
g0[14] = float32_to_bfloat16(k6[1]);
g0[15] = float32_to_bfloat16(k7[1]);
g0[16] = float32_to_bfloat16(k0[2]);
g0[17] = float32_to_bfloat16(k1[2]);
g0[18] = float32_to_bfloat16(k2[2]);
g0[19] = float32_to_bfloat16(k3[2]);
g0[20] = float32_to_bfloat16(k4[2]);
g0[21] = float32_to_bfloat16(k5[2]);
g0[22] = float32_to_bfloat16(k6[2]);
g0[23] = float32_to_bfloat16(k7[2]);
g0[24] = float32_to_bfloat16(k0[3]);
g0[25] = float32_to_bfloat16(k1[3]);
g0[26] = float32_to_bfloat16(k2[3]);
g0[27] = float32_to_bfloat16(k3[3]);
g0[28] = float32_to_bfloat16(k4[3]);
g0[29] = float32_to_bfloat16(k5[3]);
g0[30] = float32_to_bfloat16(k6[3]);
g0[31] = float32_to_bfloat16(k7[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
g0 += 32;
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
#if __aarch64__
unsigned short* g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
#else
unsigned short* g0 = kernel_tm_pack4.channel(q / 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = float32_to_bfloat16(k0[0]);
g0[1] = float32_to_bfloat16(k1[0]);
g0[2] = float32_to_bfloat16(k2[0]);
g0[3] = float32_to_bfloat16(k3[0]);
g0[4] = float32_to_bfloat16(k0[1]);
g0[5] = float32_to_bfloat16(k1[1]);
g0[6] = float32_to_bfloat16(k2[1]);
g0[7] = float32_to_bfloat16(k3[1]);
g0[8] = float32_to_bfloat16(k0[2]);
g0[9] = float32_to_bfloat16(k1[2]);
g0[10] = float32_to_bfloat16(k2[2]);
g0[11] = float32_to_bfloat16(k3[2]);
g0[12] = float32_to_bfloat16(k0[3]);
g0[13] = float32_to_bfloat16(k1[3]);
g0[14] = float32_to_bfloat16(k2[3]);
g0[15] = float32_to_bfloat16(k3[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
g0 += 16;
}
}
}
static void conv1x1s1_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2, inch, size / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2, inch, size / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
unsigned short* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.u16 {d0-d1}, [%0 :128] \n"
"vst1.u16 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // w2233_01
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r4 r5 r6 r7
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < size; i += 2)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v1.16b \n"
"mov v19.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r0 r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < size; i++)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%10] \n"
"0: \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r0
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p / 2 + p % 2);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // w0123_0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n"
"st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif
for (; i + 7 < size; i += 8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p / 2 + p % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r4 r5 r6 r7
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"vmov q12, q0 \n"
"vmov q13, q0 \n"
"vmov q14, q0 \n"
"vmov q15, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d18, q10, #16 \n"
"vshrn.u32 d19, q11, #16 \n"
"vshrn.u32 d20, q12, #16 \n"
"vshrn.u32 d21, q13, #16 \n"
"vshrn.u32 d22, q14, #16 \n"
"vshrn.u32 d23, q15, #16 \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p / 2 + p % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d18, q10, #16 \n"
"vshrn.u32 d19, q11, #16 \n"
"vst1.u16 {d16-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < size; i += 2)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p / 2 + p % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r0 r1
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.u16 {d4-d5}, [%2 :128]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p / 2 + p % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v16.4s}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r0
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"vld1.f32 {d16-d17}, [%8] \n"
"0: \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const short bias0 = bias ? bias[p] : 0.f;
//
// unsigned short* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// short sum = bias0;
//
// const unsigned short* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const unsigned short* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const unsigned short* r0 = bottom_blob.channel(p);
unsigned short* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0 + 8);
uint16x4_t _v2 = vld1_u16(r0 + 16);
uint16x4_t _v3 = vld1_u16(r0 + 24);
uint16x8_t _v01 = vcombine_u16(_v0, _v1);
uint16x8_t _v23 = vcombine_u16(_v2, _v3);
vst1q_u16(outptr, _v01);
vst1q_u16(outptr + 8, _v23);
r0 += 32;
outptr += 16;
}
for (; j + 1 < outw; j += 2)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0 + 8);
uint16x8_t _v = vcombine_u16(_v0, _v1);
vst1q_u16(outptr, _v);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _v = vld1_u16(r0);
vst1_u16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_unaryop__identity_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint32_uint64
// op(A') function: GB_tran__identity_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint32_uint64
(
uint32_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pixel.c | /*****************************************************************************
* pixel.c: h264 encoder
*****************************************************************************
* Copyright (C) 2003-2008 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Laurent Aimar <fenrir@via.ecp.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#include "common.h"
#ifdef HAVE_MMX
# include "x86/pixel.h"
#endif
#ifdef ARCH_PPC
# include "ppc/pixel.h"
#endif
#ifdef ARCH_UltraSparc
# include "sparc/pixel.h"
#endif
/****************************************************************************
* pixel_sad_WxH
****************************************************************************/
#define PIXEL_SAD_C( name, lx, ly ) \
static int name( uint8_t *pix1, int i_stride_pix1, \
uint8_t *pix2, int i_stride_pix2 ) \
{ \
int i_sum = 0; \
int x, y; \
for( y = 0; y < ly; y++ ) \
{ \
for( x = 0; x < lx; x++ ) \
{ \
i_sum += abs( pix1[x] - pix2[x] ); \
} \
pix1 += i_stride_pix1; \
pix2 += i_stride_pix2; \
} \
return i_sum; \
}
PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
PIXEL_SAD_C( x264_pixel_sad_16x8, 16, 8 )
PIXEL_SAD_C( x264_pixel_sad_8x16, 8, 16 )
PIXEL_SAD_C( x264_pixel_sad_8x8, 8, 8 )
PIXEL_SAD_C( x264_pixel_sad_8x4, 8, 4 )
PIXEL_SAD_C( x264_pixel_sad_4x8, 4, 8 )
PIXEL_SAD_C( x264_pixel_sad_4x4, 4, 4 )
/****************************************************************************
* pixel_ssd_WxH
****************************************************************************/
#define PIXEL_SSD_C( name, lx, ly ) \
static int name( uint8_t *pix1, int i_stride_pix1, \
uint8_t *pix2, int i_stride_pix2 ) \
{ \
int i_sum = 0; \
int x, y; \
for( y = 0; y < ly; y++ ) \
{ \
for( x = 0; x < lx; x++ ) \
{ \
int d = pix1[x] - pix2[x]; \
i_sum += d*d; \
} \
pix1 += i_stride_pix1; \
pix2 += i_stride_pix2; \
} \
return i_sum; \
}
PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
PIXEL_SSD_C( x264_pixel_ssd_16x8, 16, 8 )
PIXEL_SSD_C( x264_pixel_ssd_8x16, 8, 16 )
PIXEL_SSD_C( x264_pixel_ssd_8x8, 8, 8 )
PIXEL_SSD_C( x264_pixel_ssd_8x4, 8, 4 )
PIXEL_SSD_C( x264_pixel_ssd_4x8, 4, 8 )
PIXEL_SSD_C( x264_pixel_ssd_4x4, 4, 4 )
int64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2, int i_width, int i_height )
{
int64_t i_ssd = 0;
int x, y;
int align = !(((long)pix1 | (long)pix2 | i_pix1 | i_pix2) & 15);
#define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
pix2 + y*i_pix2 + x, i_pix2 );
for( y = 0; y < i_height-15; y += 16 )
{
x = 0;
if( align )
for( ; x < i_width-15; x += 16 )
SSD(PIXEL_16x16);
for( ; x < i_width-7; x += 8 )
SSD(PIXEL_8x16);
}
if( y < i_height-7 )
for( x = 0; x < i_width-7; x += 8 )
SSD(PIXEL_8x8);
#undef SSD
#define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
if( i_width % 8 != 0 )
{
for( y = 0; y < (i_height & ~7); y++ )
for( x = i_width & ~7; x < i_width; x++ )
SSD1;
}
if( i_height % 8 != 0 )
{
for( y = i_height & ~7; y < i_height; y++ )
for( x = 0; x < i_width; x++ )
SSD1;
}
#undef SSD1
return i_ssd;
}
/****************************************************************************
* pixel_var_wxh
****************************************************************************/
#define PIXEL_VAR_C( name, w, shift ) \
static int name( uint8_t *pix, int i_stride, uint32_t *sad ) \
{ \
uint32_t var = 0, sum = 0, sqr = 0; \
int x, y; \
for( y = 0; y < w; y++ ) \
{ \
for( x = 0; x < w; x++ ) \
{ \
sum += pix[x]; \
sqr += pix[x] * pix[x]; \
} \
pix += i_stride; \
} \
var = sqr - (sum * sum >> shift); \
*sad = sum; \
return var; \
}
PIXEL_VAR_C( x264_pixel_var_16x16, 16, 8 )
PIXEL_VAR_C( x264_pixel_var_8x8, 8, 6 )
#define HADAMARD4(d0,d1,d2,d3,s0,s1,s2,s3) {\
int t0 = s0 + s1;\
int t1 = s0 - s1;\
int t2 = s2 + s3;\
int t3 = s2 - s3;\
d0 = t0 + t2;\
d2 = t0 - t2;\
d1 = t1 + t3;\
d3 = t1 - t3;\
}
/****************************************************************************
* pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
****************************************************************************/
static int pixel_satd_wxh( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2, int i_width, int i_height )
{
int16_t tmp[4][4];
int x, y;
int i_satd = 0;
#pragma omp parallel for
for( y = 0; y < i_height; y += 4 )
{
for( x = 0; x < i_width; x += 4 )
{
int i;
uint8_t *p1 = pix1+x, *p2 = pix2+x;
for( i=0; i<4; i++, p1+=i_pix1, p2+=i_pix2 )
{
int a0 = p1[0] - p2[0];
int a1 = p1[1] - p2[1];
int a2 = p1[2] - p2[2];
int a3 = p1[3] - p2[3];
HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
}
for( i=0; i<4; i++ )
{
int a0,a1,a2,a3;
HADAMARD4( a0,a1,a2,a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
i_satd += abs(a0) + abs(a1) + abs(a2) + abs(a3);
}
}
pix1 += 4 * i_pix1;
pix2 += 4 * i_pix2;
}
return i_satd / 2;
}
#define PIXEL_SATD_C( name, width, height ) \
static int name( uint8_t *pix1, int i_stride_pix1, \
uint8_t *pix2, int i_stride_pix2 ) \
{ \
return pixel_satd_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
}
PIXEL_SATD_C( x264_pixel_satd_16x16, 16, 16 )
PIXEL_SATD_C( x264_pixel_satd_16x8, 16, 8 )
PIXEL_SATD_C( x264_pixel_satd_8x16, 8, 16 )
PIXEL_SATD_C( x264_pixel_satd_8x8, 8, 8 )
PIXEL_SATD_C( x264_pixel_satd_8x4, 8, 4 )
PIXEL_SATD_C( x264_pixel_satd_4x8, 4, 8 )
PIXEL_SATD_C( x264_pixel_satd_4x4, 4, 4 )
/****************************************************************************
* pixel_sa8d_WxH: sum of 8x8 Hadamard transformed differences
****************************************************************************/
#define SA8D_1D {\
int b0,b1,b2,b3,b4,b5,b6,b7;\
HADAMARD4( b0,b1,b2,b3, SRC(0), SRC(1), SRC(2), SRC(3) );\
HADAMARD4( b4,b5,b6,b7, SRC(4), SRC(5), SRC(6), SRC(7) );\
DST(0, b0 + b4);\
DST(4, b0 - b4);\
DST(1, b1 + b5);\
DST(5, b1 - b5);\
DST(2, b2 + b6);\
DST(6, b2 - b6);\
DST(3, b3 + b7);\
DST(7, b3 - b7);\
}
static inline int pixel_sa8d_wxh( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2,
int i_width, int i_height )
{
int16_t diff[8][8];
int i_satd = 0;
int x, y;
#pragma omp parallel for
for( y = 0; y < i_height; y += 8 )
{
for( x = 0; x < i_width; x += 8 )
{
int i;
uint8_t *p1 = pix1+x, *p2 = pix2+x;
#define SRC(x) a##x
#define DST(x,rhs) diff[i][x] = (rhs)
for( i=0; i<8; i++, p1+=i_pix1, p2+=i_pix2 )
{
int a0 = p1[0] - p2[0];
int a1 = p1[1] - p2[1];
int a2 = p1[2] - p2[2];
int a3 = p1[3] - p2[3];
int a4 = p1[4] - p2[4];
int a5 = p1[5] - p2[5];
int a6 = p1[6] - p2[6];
int a7 = p1[7] - p2[7];
SA8D_1D
}
#undef SRC
#undef DST
#define SRC(x) diff[x][i]
#define DST(x,rhs) i_satd += abs(rhs)
for( i=0; i<8; i++ )
SA8D_1D
#undef SRC
#undef DST
}
pix1 += 8 * i_pix1;
pix2 += 8 * i_pix2;
}
return i_satd;
}
#define PIXEL_SA8D_C( width, height ) \
static int x264_pixel_sa8d_##width##x##height( uint8_t *pix1, int i_stride_pix1, \
uint8_t *pix2, int i_stride_pix2 ) \
{ \
return ( pixel_sa8d_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ) + 2 ) >> 2; \
}
PIXEL_SA8D_C( 16, 16 )
PIXEL_SA8D_C( 16, 8 )
PIXEL_SA8D_C( 8, 16 )
PIXEL_SA8D_C( 8, 8 )
static uint64_t pixel_hadamard_ac( uint8_t *pix, int stride )
{
int16_t tmp[8][8];
int sum4=0, sum8=0;
int i;
for( i=0; i<8; i++, pix+=stride )
{
HADAMARD4( tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i],
pix[0], pix[1], pix[2], pix[3] );
HADAMARD4( tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i],
pix[4], pix[5], pix[6], pix[7] );
}
for( i=0; i<8; i++ )
{
int a0,a1,a2,a3,a4,a5,a6,a7;
HADAMARD4( a0,a1,a2,a3, tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3] );
sum4 += abs(a0) + abs(a1) + abs(a2) + abs(a3);
HADAMARD4( a4,a5,a6,a7, tmp[i][4], tmp[i][5], tmp[i][6], tmp[i][7] );
sum4 += abs(a4) + abs(a5) + abs(a6) + abs(a7);
tmp[i][0] = a0 + a4;
tmp[i][4] = a0 - a4;
tmp[i][1] = a1 + a5;
tmp[i][5] = a1 - a5;
tmp[i][2] = a2 + a6;
tmp[i][6] = a2 - a6;
tmp[i][3] = a3 + a7;
tmp[i][7] = a3 - a7;
}
for( i=0; i<8; i++ )
{
sum8 += abs( tmp[0][i] + tmp[4][i] )
+ abs( tmp[0][i] - tmp[4][i] )
+ abs( tmp[1][i] + tmp[5][i] )
+ abs( tmp[1][i] - tmp[5][i] )
+ abs( tmp[2][i] + tmp[6][i] )
+ abs( tmp[2][i] - tmp[6][i] )
+ abs( tmp[3][i] + tmp[7][i] )
+ abs( tmp[3][i] - tmp[7][i] );
}
sum4 -= tmp[0][0]+tmp[4][0];
sum8 -= tmp[0][0]+tmp[4][0];
return ((uint64_t)sum8<<32) + sum4;
}
#define HADAMARD_AC(w,h) \
static uint64_t x264_pixel_hadamard_ac_##w##x##h( uint8_t *pix, int stride )\
{\
uint64_t sum = pixel_hadamard_ac( pix, stride );\
if( w==16 )\
sum += pixel_hadamard_ac( pix+8, stride );\
if( h==16 )\
sum += pixel_hadamard_ac( pix+8*stride, stride );\
if( w==16 && h==16 )\
sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
}
HADAMARD_AC( 16, 16 )
HADAMARD_AC( 16, 8 )
HADAMARD_AC( 8, 16 )
HADAMARD_AC( 8, 8 )
/****************************************************************************
* pixel_sad_x4
****************************************************************************/
#define SAD_X( size ) \
static void x264_pixel_sad_x3_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )\
{\
scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
}\
static void x264_pixel_sad_x4_##size( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )\
{\
scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
}
SAD_X( 16x16 )
SAD_X( 16x8 )
SAD_X( 8x16 )
SAD_X( 8x8 )
SAD_X( 8x4 )
SAD_X( 4x8 )
SAD_X( 4x4 )
#ifdef ARCH_UltraSparc
SAD_X( 16x16_vis )
SAD_X( 16x8_vis )
SAD_X( 8x16_vis )
SAD_X( 8x8_vis )
#endif
/****************************************************************************
* pixel_satd_x4
* no faster than single satd, but needed for satd to be a drop-in replacement for sad
****************************************************************************/
#define SATD_X( size, cpu ) \
static void x264_pixel_satd_x3_##size##cpu( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, int i_stride, int scores[3] )\
{\
scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
}\
static void x264_pixel_satd_x4_##size##cpu( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1, uint8_t *pix2, uint8_t *pix3, int i_stride, int scores[4] )\
{\
scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
}
#define SATD_X_DECL6( cpu )\
SATD_X( 16x16, cpu )\
SATD_X( 16x8, cpu )\
SATD_X( 8x16, cpu )\
SATD_X( 8x8, cpu )\
SATD_X( 8x4, cpu )\
SATD_X( 4x8, cpu )
#define SATD_X_DECL7( cpu )\
SATD_X_DECL6( cpu )\
SATD_X( 4x4, cpu )
SATD_X_DECL7()
#ifdef HAVE_MMX
SATD_X_DECL7( _mmxext )
SATD_X_DECL6( _sse2 )
SATD_X_DECL7( _ssse3 )
SATD_X_DECL6( _ssse3_phadd )
#endif
/****************************************************************************
* structural similarity metric
****************************************************************************/
static void ssim_4x4x2_core( const uint8_t *pix1, int stride1,
const uint8_t *pix2, int stride2,
int sums[2][4])
{
int x, y, z;
#pragma omp parallel for
for(z=0; z<2; z++)
{
uint32_t s1=0, s2=0, ss=0, s12=0;
for(y=0; y<4; y++)
for(x=0; x<4; x++)
{
int a = pix1[x+y*stride1];
int b = pix2[x+y*stride2];
s1 += a;
s2 += b;
ss += a*a;
ss += b*b;
s12 += a*b;
}
sums[z][0] = s1;
sums[z][1] = s2;
sums[z][2] = ss;
sums[z][3] = s12;
pix1 += 4;
pix2 += 4;
}
}
static float ssim_end1( int s1, int s2, int ss, int s12 )
{
static const int ssim_c1 = (int)(.01*.01*255*255*64 + .5);
static const int ssim_c2 = (int)(.03*.03*255*255*64*63 + .5);
int vars = ss*64 - s1*s1 - s2*s2;
int covar = s12*64 - s1*s2;
return (float)(2*s1*s2 + ssim_c1) * (float)(2*covar + ssim_c2)\
/ ((float)(s1*s1 + s2*s2 + ssim_c1) * (float)(vars + ssim_c2));
}
static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
{
int i;
float ssim = 0.0;
for( i = 0; i < width; i++ )
ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
return ssim;
}
float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
uint8_t *pix1, int stride1,
uint8_t *pix2, int stride2,
int width, int height )
{
int x, y, z;
float ssim = 0.0;
int (*sum0)[4] = x264_malloc(4 * (width/4+3) * sizeof(int));
int (*sum1)[4] = x264_malloc(4 * (width/4+3) * sizeof(int));
width >>= 2;
height >>= 2;
z = 0;
#pragma omp parallel for
for( y = 1; y < height; y++ )
{
for( ; z <= y; z++ )
{
XCHG( void*, sum0, sum1 );
for( x = 0; x < width; x+=2 )
pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
}
for( x = 0; x < width-1; x += 4 )
ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
}
x264_free(sum0);
x264_free(sum1);
return ssim;
}
/****************************************************************************
* successive elimination
****************************************************************************/
static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
{
int nmv=0, i;
for( i=0; i<width; i++, sums++ )
{
int ads = abs( enc_dc[0] - sums[0] )
+ abs( enc_dc[1] - sums[8] )
+ abs( enc_dc[2] - sums[delta] )
+ abs( enc_dc[3] - sums[delta+8] )
+ cost_mvx[i];
if( ads < thresh )
mvs[nmv++] = i;
}
return nmv;
}
static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
{
int nmv=0, i;
for( i=0; i<width; i++, sums++ )
{
int ads = abs( enc_dc[0] - sums[0] )
+ abs( enc_dc[1] - sums[delta] )
+ cost_mvx[i];
if( ads < thresh )
mvs[nmv++] = i;
}
return nmv;
}
static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
{
int nmv=0, i;
for( i=0; i<width; i++, sums++ )
{
int ads = abs( enc_dc[0] - sums[0] )
+ cost_mvx[i];
if( ads < thresh )
mvs[nmv++] = i;
}
return nmv;
}
/****************************************************************************
* x264_pixel_init:
****************************************************************************/
void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
{
memset( pixf, 0, sizeof(*pixf) );
#define INIT2_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
pixf->name1[PIXEL_16x8] = x264_pixel_##name2##_16x8##cpu;
#define INIT4_NAME( name1, name2, cpu ) \
INIT2_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_8x16] = x264_pixel_##name2##_8x16##cpu;\
pixf->name1[PIXEL_8x8] = x264_pixel_##name2##_8x8##cpu;
#define INIT5_NAME( name1, name2, cpu ) \
INIT4_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_8x4] = x264_pixel_##name2##_8x4##cpu;
#define INIT6_NAME( name1, name2, cpu ) \
INIT5_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;
#define INIT7_NAME( name1, name2, cpu ) \
INIT6_NAME( name1, name2, cpu ) \
pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
#define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
#define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
#define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
#define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
#define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
#define INIT_ADS( cpu ) \
pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
INIT7( sad, );
INIT7_NAME( sad_aligned, sad, );
INIT7( sad_x3, );
INIT7( sad_x4, );
INIT7( ssd, );
INIT7( satd, );
INIT7( satd_x3, );
INIT7( satd_x4, );
INIT4( sa8d, );
INIT4( hadamard_ac, );
INIT_ADS( );
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8;
pixf->ssim_4x4x2_core = ssim_4x4x2_core;
pixf->ssim_end4 = ssim_end4;
#ifdef HAVE_MMX
if( cpu&X264_CPU_MMX )
{
INIT7( ssd, _mmx );
}
if( cpu&X264_CPU_MMXEXT )
{
INIT7( sad, _mmxext );
INIT7_NAME( sad_aligned, sad, _mmxext );
INIT7( sad_x3, _mmxext );
INIT7( sad_x4, _mmxext );
INIT7( satd, _mmxext );
INIT7( satd_x3, _mmxext );
INIT7( satd_x4, _mmxext );
INIT4( hadamard_ac, _mmxext );
INIT_ADS( _mmxext );
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_mmxext;
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_mmxext;
#ifdef ARCH_X86
pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmxext;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmxext;
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmxext;
pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmxext;
if( cpu&X264_CPU_CACHELINE_32 )
{
INIT5( sad, _cache32_mmxext );
INIT4( sad_x3, _cache32_mmxext );
INIT4( sad_x4, _cache32_mmxext );
}
else if( cpu&X264_CPU_CACHELINE_64 )
{
INIT5( sad, _cache64_mmxext );
INIT4( sad_x3, _cache64_mmxext );
INIT4( sad_x4, _cache64_mmxext );
}
#else
if( cpu&X264_CPU_CACHELINE_64 )
{
pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmxext;
pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmxext;
pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmxext;
pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmxext;
pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmxext;
pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmxext;
pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmxext;
}
#endif
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmxext;
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmxext;
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmxext;
pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmxext;
}
if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
{
INIT2( sad, _sse2 );
INIT2( sad_x3, _sse2 );
INIT2( sad_x4, _sse2 );
if( !(cpu&X264_CPU_STACK_MOD4) )
{
INIT4( hadamard_ac, _sse2 );
}
INIT_ADS( _sse2 );
pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
#ifdef ARCH_X86
if( cpu&X264_CPU_CACHELINE_64 )
{
INIT2( sad, _cache64_sse2 );
INIT2( sad_x3, _cache64_sse2 );
INIT2( sad_x4, _cache64_sse2 );
}
#endif
if( cpu&X264_CPU_SSE_MISALIGN )
{
INIT2( sad_x3, _sse2_misalign );
INIT2( sad_x4, _sse2_misalign );
}
}
if( cpu&X264_CPU_SSE2 )
{
INIT5( ssd, _sse2 );
if( cpu&X264_CPU_SSE2_IS_FAST )
{
INIT6( satd, _sse2 );
INIT6( satd_x3, _sse2 );
INIT6( satd_x4, _sse2 );
}
else
{
INIT5( satd, _sse2 );
INIT5( satd_x3, _sse2 );
INIT5( satd_x4, _sse2 );
}
INIT2_NAME( sad_aligned, sad, _sse2_aligned );
pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
#ifdef ARCH_X86_64
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
#endif
}
if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
{
pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
}
if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
{
INIT2( sad, _sse3 );
INIT2( sad_x3, _sse3 );
INIT2( sad_x4, _sse3 );
}
if( cpu&X264_CPU_SSSE3 )
{
INIT7( satd, _ssse3 );
INIT7( satd_x3, _ssse3 );
INIT7( satd_x4, _ssse3 );
if( !(cpu&X264_CPU_STACK_MOD4) )
{
INIT4( hadamard_ac, _ssse3 );
}
INIT_ADS( _ssse3 );
pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_ssse3;
#ifdef ARCH_X86_64
pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_ssse3;
#endif
if( cpu&X264_CPU_CACHELINE_64 )
{
INIT2( sad, _cache64_ssse3 );
INIT2( sad_x3, _cache64_ssse3 );
INIT2( sad_x4, _cache64_ssse3 );
}
if( cpu&X264_CPU_PHADD_IS_FAST )
{
INIT6( satd, _ssse3_phadd );
INIT6( satd_x3, _ssse3_phadd );
INIT6( satd_x4, _ssse3_phadd );
}
}
if( cpu&X264_CPU_SSE4 )
{
pixf->ssd[PIXEL_4x8] = x264_pixel_ssd_4x8_sse4;
pixf->ssd[PIXEL_4x4] = x264_pixel_ssd_4x4_sse4;
}
#endif //HAVE_MMX
#ifdef ARCH_PPC
if( cpu&X264_CPU_ALTIVEC )
{
x264_pixel_altivec_init( pixf );
}
#endif
#ifdef ARCH_UltraSparc
INIT4( sad, _vis );
INIT4( sad_x3, _vis );
INIT4( sad_x4, _vis );
#endif
pixf->ads[PIXEL_8x16] =
pixf->ads[PIXEL_8x4] =
pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
}
|
basis_tri_p1.h | /*
Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of
Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of VSB - Technical University of Ostrava and Graz
University of Technology nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND
GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file basis_tri_p1.h
* @brief Contains a class representing p1 (piecewise linear) basis functions on
* a triangular surface mesh.
* @note updated documentation
*/
#ifndef INCLUDE_BESTHEA_BASIS_TRI_P1_H_
#define INCLUDE_BESTHEA_BASIS_TRI_P1_H_
#include "besthea/basis_function.h"
#include "besthea/triangular_surface_mesh.h"
namespace besthea {
namespace bem {
class basis_tri_p1;
}
}
/**
* Class representing a piecewise linear function on a triangular mesh.
*/
class besthea::bem::basis_tri_p1
: public besthea::bem::basis_function< besthea::bem::basis_tri_p1 > {
public:
/**
* Constructor.
* @param[in] mesh Triangular surface mesh on which the basis functions are
* defined.
*/
basis_tri_p1( const mesh_type & mesh );
/**
* Destructor.
*/
virtual ~basis_tri_p1( );
/**
* Returns the number of basis functions supported on a single element.
*
* This is always 3.
*/
virtual lo dimension_local( ) const;
/**
* Returns the number of basis functions on the whole mesh.
*
* This is the number of all nodes in the underlying triangular surface mesh.
*/
virtual lo dimension_global( ) const;
/**
* Returns the global indices of the nodes of the given element.
* @param[in] i_elem Element index.
* @param[out] indices Global node indices of the element.
*/
void do_local_to_global( lo i_elem, std::vector< lo > & indices ) const;
/**
* Returns the global indices of the nodes of the given element. Their order
* is modified according to the given parameters (regularized quadrature).
* @param[in] i_elem Element index.
* @param[in] n_shared_vertices Number of shared vertices in currect elements
* (regularized quadrature).
* @param[in] rotation Virtual element rotation (regularized quadrature).
* @param[in] swap Virtual element inversion (regularized quadrature).
* @param[out] indices Global node indices of the element in modified order.
*/
void do_local_to_global( lo i_elem, int n_shared_vertices, int rotation,
bool swap, std::vector< lo > & indices ) const;
/**
* Evaluates a basis function in a point in an element. The point is given by
* coordinates in the reference triangle
* (\f$ (x_1,x_2) \in (0,1)\times(0,1-x_1) \f$).
* @param[in] i_elem Element index.
* @param[in] i_fun Local basis function index.
* @param[in] x1_ref First coordinate of reference quadrature point.
* @param[in] x2_ref Second coordinate of reference quadrature point.
* @param[in] n Outward normal vector on the element.
* \note By the nature of the basis functions, the result does not depend on
* the choice of the element, and in particular not on the outward normal
* vector.
*/
#pragma omp declare simd uniform( this, i_elem, i_fun, n ) simdlen( DATA_WIDTH )
sc do_evaluate( [[maybe_unused]] lo i_elem, lo i_fun, sc x1_ref, sc x2_ref,
[[maybe_unused]] const sc * n ) const {
sc value = 0.0;
if ( i_fun == 0 ) {
value = 1 - x1_ref - x2_ref;
} else if ( i_fun == 1 ) {
value = x1_ref;
} else if ( i_fun == 2 ) {
value = x2_ref;
}
return value;
}
/**
* Evaluates a basis function in a point in an element. The point is given by
* coordinates in the reference triangle
* (\f$ (x_1,x_2) \in (0,1)\times(0,1-x_1) \f$).
* @param[in] i_elem Element index.
* @param[in] i_fun Local basis function index.
* @param[in] x1_ref First coordinate of reference quadrature point.
* @param[in] x2_ref Second coordinate of reference quadrature point.
* @param[in] n Outward normal vector on the element
* @param[in] n_shared_vertices Number of shared vertices in currect elements
* (regularized quadrature).
* @param[in] rotation Virtual element rotation (regularized quadrature).
* @param[in] swap Virtual element inversion (regularized quadrature).
* \note By the nature of the basis functions, the result does not depend on
* the choice of the element, and in particular not on the outward normal
* vector.
* \note The regularized quadrature parameters do not influence the result
* either.
*/
#pragma omp declare simd uniform( this, i_elem, i_fun, n, n_shared_vertices, \
rotation, swap ) simdlen( DATA_WIDTH )
sc do_evaluate( [[maybe_unused]] lo i_elem, lo i_fun, sc x1_ref, sc x2_ref,
[[maybe_unused]] const sc * n, [[maybe_unused]] int n_shared_vertices,
[[maybe_unused]] int rotation, [[maybe_unused]] bool swap ) const {
sc value = 0.0;
if ( i_fun == 0 ) {
value = 1 - x1_ref - x2_ref;
} else if ( i_fun == 1 ) {
value = x1_ref;
} else if ( i_fun == 2 ) {
value = x2_ref;
}
return value;
}
/**
* Evaluates the surface curl of all basis functions in a given element.
* @param[in] i_elem Element index.
* @param[in] n Outward normal vector on the element
* @param[in] n_shared_vertices Number of shared vertices in currect elements
* (regularized quadrature).
* @param[in] rotation Virtual element rotation (regularized quadrature).
* @param[in] swap Virtual element inversion (regularized quadrature).
* @param[out] curls Surface curls of all three shape functions.
*/
void evaluate_curl( lo i_elem, const linear_algebra::coordinates< 3 > & n,
int n_shared_vertices, int rotation, bool swap, sc * curls ) const;
};
#endif /* INCLUDE_BESTHEA_BASIS_TRI_P1_H_ */
|
gimple.h | /* Gimple IR definitions.
Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
#include "pointer-set.h"
#include "vec.h"
#include "ggc.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "tree-ssa-operands.h"
DEF_VEC_P(gimple);
DEF_VEC_ALLOC_P(gimple,heap);
DEF_VEC_ALLOC_P(gimple,gc);
typedef gimple *gimple_p;
DEF_VEC_P(gimple_p);
DEF_VEC_ALLOC_P(gimple_p,heap);
DEF_VEC_P(gimple_seq);
DEF_VEC_ALLOC_P(gimple_seq,gc);
DEF_VEC_ALLOC_P(gimple_seq,heap);
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef VEC(gimple, heap) *gimple_vec;
DEF_VEC_P (gimple_vec);
DEF_VEC_ALLOC_P (gimple_vec, heap);
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use sub-codes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_CANNOT_INLINE = 1 << 0,
GF_CALL_FROM_THUNK = 1 << 1,
GF_CALL_RETURN_SLOT_OPT = 1 << 2,
GF_CALL_TAILCALL = 1 << 3,
GF_CALL_VA_ARG_PACK = 1 << 4,
GF_CALL_NOTHROW = 1 << 5,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there's only one type of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* A node in a gimple_seq_d. */
struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d {
gimple stmt;
struct gimple_seq_node_d *prev;
struct gimple_seq_node_d *next;
};
/* A double-linked sequence of gimple statements. */
struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d {
/* First and last statements in the sequence. */
gimple_seq_node first;
gimple_seq_node last;
/* Sequences are created/destroyed frequently. To minimize
allocation activity, deallocated sequences are kept in a pool of
available sequences. This is the pointer to the next free
sequence in the pool. */
gimple_seq next_free;
};
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (const_gimple_seq s)
{
return s ? s->first : NULL;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return (n) ? n->stmt : NULL;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (const_gimple_seq s)
{
return s ? s->last : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return (n) ? n->stmt : NULL;
}
/* Set the last node in GIMPLE sequence S to LAST. */
static inline void
gimple_seq_set_last (gimple_seq s, gimple_seq_node last)
{
s->last = last;
}
/* Set the first node in GIMPLE sequence S to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq s, gimple_seq_node first)
{
s->first = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (const_gimple_seq s)
{
return s == NULL || s->first == NULL;
}
void gimple_seq_add_stmt (gimple_seq *, gimple);
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void gimplify_seq_add_stmt (gimple_seq *, gimple);
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_assert (!(bb->flags & BB_RTL));
bb->il.gimple->seq = seq;
}
/* Iterator object for GIMPLE statement sequences. */
typedef struct
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
/* Sequence and basic block holding the statement. These fields
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
gimple_seq seq;
basic_block bb;
} gimple_stmt_iterator;
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY(()) gimple_statement_base {
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
struct basic_block_def *bb;
/* [ WORD 4 ]
Lexical block holding this statement. */
tree block;
};
/* Base structure for tuples with operands. */
struct GTY(()) gimple_statement_with_ops_base
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct def_optype_d GTY((skip (""))) *def_ops;
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY(()) gimple_statement_with_ops
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops_base
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7-8 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 6 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree types;
/* [ WORD 6 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Filter types. */
tree types;
/* [ WORD 6 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 6 ] */
tree result;
/* [ WORD 7 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY(()) gimple_statement_eh_ctrl
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Exception region number. */
int region;
};
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 6 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 5 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY(()) gimple_statement_asm
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
__asm__ statement. */
const char *string;
/* [ WORD 10 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 11 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 8 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 9 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Clauses. */
tree clauses;
/* [ WORD 7 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 8 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
/* [ WORD 1-8 ] */
struct gimple_statement_omp_parallel par;
/* [ WORD 9 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 10-11 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree control_def;
/* [ WORD 6 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree val;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
union GTY ((desc ("gimple_statement_structure (&%h)"))) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
};
/* In gimple.c. */
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
gimple gimple_build_return (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
void extract_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree,
tree MEM_STAT_DECL);
#define gimple_build_assign_with_ops(c,o1,o2,o3) \
gimple_build_assign_with_ops_stat (c, o1, o2, o3 MEM_STAT_INFO)
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_call_vec (tree, VEC(tree, heap) *);
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *,
VEC(tree,gc) *, VEC(tree,gc) *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (unsigned, tree, tree, ...);
gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (VEC(tree,heap) *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
gimple_seq gimple_seq_alloc (void);
void gimple_seq_free (gimple_seq);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
int gimple_call_flags (const_gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_single_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, struct basic_block_def *);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *, enum tree_code,
tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
void gimple_replace_lhs (gimple, tree);
gimple gimple_copy (gimple);
bool is_gimple_operand (const_tree);
void gimple_set_modified (gimple, bool);
void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
bool gimple_has_side_effects (const_gimple);
bool gimple_rhs_has_side_effects (const_gimple);
bool gimple_could_trap_p (gimple);
bool gimple_assign_rhs_could_trap_p (gimple);
void gimple_regimplify_operands (gimple, gimple_stmt_iterator *);
bool empty_body_p (gimple_seq);
unsigned get_gimple_rhs_num_ops (enum tree_code);
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
const char *gimple_decl_printable_name (tree, int);
tree gimple_fold_obj_type_ref (tree, tree);
/* Returns true iff T is a valid GIMPLE statement. */
extern bool is_gimple_stmt (tree);
/* Returns true iff TYPE is a valid type for a scalar register variable. */
extern bool is_gimple_reg_type (tree);
/* Returns true iff T is a scalar register variable. */
extern bool is_gimple_reg (tree);
/* Returns true iff T is any sort of variable. */
extern bool is_gimple_variable (tree);
/* Returns true iff T is any sort of symbol. */
extern bool is_gimple_id (tree);
/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */
extern bool is_gimple_min_lval (tree);
/* Returns true iff T is something whose address can be taken. */
extern bool is_gimple_addressable (tree);
/* Returns true iff T is any valid GIMPLE lvalue. */
extern bool is_gimple_lvalue (tree);
/* Returns true iff T is a GIMPLE address. */
bool is_gimple_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address. */
bool is_gimple_invariant_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address at interprocedural
level. */
bool is_gimple_ip_invariant_address (const_tree);
/* Returns true iff T is a valid GIMPLE constant. */
bool is_gimple_constant (const_tree);
/* Returns true iff T is a GIMPLE restricted function invariant. */
extern bool is_gimple_min_invariant (const_tree);
/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */
extern bool is_gimple_ip_invariant (const_tree);
/* Returns true iff T is a GIMPLE rvalue. */
extern bool is_gimple_val (tree);
/* Returns true iff T is a GIMPLE asm statement input. */
extern bool is_gimple_asm_val (tree);
/* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a
GIMPLE temporary, a renamed user variable, or something else,
respectively. */
extern bool is_gimple_reg_rhs (tree);
extern bool is_gimple_mem_rhs (tree);
/* Returns true iff T is a valid if-statement condition. */
extern bool is_gimple_condexpr (tree);
/* Returns true iff T is a type conversion. */
extern bool is_gimple_cast (tree);
/* Returns true iff T is a variable that does not need to live in memory. */
extern bool is_gimple_non_addressable (tree t);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
/* If T makes a function call, returns the CALL_EXPR operand. */
extern tree get_call_expr_in (tree t);
extern void recalculate_side_effects (tree);
extern bool compare_field_offset (tree, tree);
extern tree gimple_register_type (tree);
extern void print_gimple_types_stats (void);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *,
unsigned *);
extern bool walk_stmt_load_store_addr_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool walk_stmt_load_store_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool gimple_ior_addresses_taken (bitmap, gimple);
/* In gimplify.c */
extern tree create_tmp_var_raw (tree, const char *);
extern tree create_tmp_var_name (const char *);
extern tree create_tmp_var (tree, const char *);
extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *);
extern tree get_formal_tmp_var (tree, gimple_seq *);
extern void declare_vars (tree, gimple, bool);
extern void annotate_all_with_location (gimple_seq, location_t);
/* Validation of GIMPLE expressions. Note that these predicates only check
the basic form of the expression, they don't recurse to make sure that
underlying nodes are also of the right form. */
typedef bool (*gimple_predicate)(tree);
/* FIXME we should deduce this from the predicate. */
enum fallback {
fb_none = 0, /* Do not generate a temporary. */
fb_rvalue = 1, /* Generate an rvalue to hold the result of a
gimplified expression. */
fb_lvalue = 2, /* Generate an lvalue to hold the result of a
gimplified expression. */
fb_mayfail = 4, /* Gimplification may fail. Error issued
afterwards. */
fb_either= fb_rvalue | fb_lvalue
};
typedef int fallback_t;
enum gimplify_status {
GS_ERROR = -2, /* Something Bad Seen. */
GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
GS_OK = 0, /* We did something, maybe more to do. */
GS_ALL_DONE = 1 /* The expression is fully gimplified. */
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
VEC(gimple,heap) *bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
VEC(tree,heap) *case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
};
extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t);
extern void gimplify_type_sizes (tree, gimple_seq *);
extern void gimplify_one_sizepos (tree *, gimple_seq *);
extern bool gimplify_stmt (tree *, gimple_seq *);
extern gimple gimplify_body (tree *, tree, bool);
extern void push_gimplify_context (struct gimplify_ctx *);
extern void pop_gimplify_context (gimple);
extern void gimplify_and_add (tree, gimple_seq *);
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
extern VEC(gimple, heap) *gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree alloc_stmt_list (void);
extern void free_stmt_list (tree);
extern tree force_labels_r (tree *, int *, void *);
extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
gimple_seq *);
struct gimplify_omp_ctx;
extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
extern tree gimple_boolify (tree);
extern gimple_predicate rhs_predicate_for (tree);
extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
/* In gimplify.c. */
extern void gimplify_function_tree (tree);
/* In cfgexpand.c. */
extern tree gimple_assign_rhs_to_tree (gimple);
/* In builtins.c */
extern bool validate_gimple_arglist (const_gimple, ...);
/* In tree-ssa.c */
extern bool tree_ssa_useless_type_conversion (tree);
extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->gsbase.code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
#ifdef ENABLE_CHECKING
gcc_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
#endif
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline struct basic_block_def *
gimple_bb (const_gimple g)
{
return g->gsbase.bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return g->gsbase.block;
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
g->gsbase.block = block;
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->gsbase.location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->gsbase.location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->gsbase.location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return gimple_location (g) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->gsbase.no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->gsbase.no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->gsbase.visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->gsbase.visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->gsbase.plf |= (unsigned int) plf;
else
stmt->gsbase.plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->gsbase.plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->gsbase.uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->gsbase.uid;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return the set of DEF operands for statement G. */
static inline struct def_optype_d *
gimple_def_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.def_ops;
}
/* Set DEF to be the set of DEF operands for statement G. */
static inline void
gimple_set_def_ops (gimple g, struct def_optype_d *def)
{
gcc_assert (gimple_has_ops (g));
g->gsops.opbase.def_ops = def;
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gcc_assert (gimple_has_ops (g));
g->gsops.opbase.use_ops = use;
}
/* Return the set of VUSE operand for statement G. */
static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_USE_OPERAND_P;
ops = g->gsops.opbase.use_ops;
if (ops
&& USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
/* Return the set of VDEF operand for statement G. */
static inline def_operand_p
gimple_vdef_op (const_gimple g)
{
struct def_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_DEF_OPERAND_P;
ops = g->gsops.opbase.def_ops;
if (ops
&& DEF_OP_PTR (ops) == &g->gsmembase.vdef)
return DEF_OP_PTR (ops);
return NULL_DEF_OPERAND_P;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gcc_assert (gimple_has_mem_ops (g));
g->gsmembase.vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gcc_assert (gimple_has_mem_ops (g));
g->gsmembase.vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->gsbase.subcode;
else if (code == GIMPLE_CALL)
return CALL_EXPR;
else
gcc_unreachable ();
}
/* Mark statement S as modified, and update it. */
static inline void
update_stmt (gimple s)
{
if (gimple_has_ops (s))
{
gimple_set_modified (s, true);
update_stmt_operands (s);
}
}
/* Update statement S if it has been optimized. */
static inline void
update_stmt_if_modified (gimple s)
{
if (gimple_modified_p (s))
update_stmt_operands (s);
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->gsbase.has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_SINGLE);
return s->gsbase.subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_assert (subcode < (1 << 16));
s->gsbase.subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->gsbase.subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->gsbase.num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->gsbase.num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
#ifdef ENABLE_CHECKING
gcc_assert (i < gimple_num_ops (gs));
#endif
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
#ifdef ENABLE_CHECKING
gcc_assert (i < gimple_num_ops (gs));
#endif
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->gsbase.nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->gsbase.nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = gimple_expr_code (gs);
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->gsbase.subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
tree addr = gimple_call_fn (gs);
if (TREE_CODE (addr) == ADDR_EXPR)
return TREE_OPERAND (addr, 0);
return NULL_TREE;
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree fn = gimple_call_fn (gs);
tree type = TREE_TYPE (fn);
/* See through the pointer. */
type = TREE_TYPE (type);
/* The type returned by a FUNCTION_DECL is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->gsbase.subcode |= GF_CALL_TAILCALL;
else
s->gsbase.subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
}
/* Set the inlinable status of GIMPLE_CALL S to INLINABLE_P. */
static inline void
gimple_call_set_cannot_inline (gimple s, bool inlinable_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (inlinable_p)
s->gsbase.subcode |= GF_CALL_CANNOT_INLINE;
else
s->gsbase.subcode &= ~GF_CALL_CANNOT_INLINE;
}
/* Return true if GIMPLE_CALL S cannot be inlined. */
static inline bool
gimple_call_cannot_inline_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_CANNOT_INLINE) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->gsbase.subcode |= GF_CALL_FROM_THUNK;
else
s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
else
s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->gsbase.subcode |= GF_CALL_NOTHROW;
else
s->gsbase.subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->gsbase.subcode = orig_call->gsbase.subcode;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->gsbase.subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->gsbase.subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (var != 0)' or
'if (var == 1)' */
static inline bool
gimple_cond_single_var_p (gimple gs)
{
if (gimple_cond_code (gs) == NE_EXPR
&& gimple_cond_rhs (gs) == boolean_false_node)
return true;
if (gimple_cond_code (gs) == EQ_EXPR
&& gimple_cond_rhs (gs) == boolean_true_node)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.body;
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_seq (&gs->gimple_bind.body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gcc_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK);
gs->gimple_bind.block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
return gimple_op (gs, index);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
return gimple_op_ptr (gs, index);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
gcc_assert (TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
return gimple_op (gs, index + gs->gimple_asm.ni);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
return gimple_op_ptr (gs, index + gs->gimple_asm.ni);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
gcc_assert (TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nc);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nc);
gcc_assert (TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nl);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nl);
gcc_assert (TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->gsbase.subcode |= GF_ASM_VOLATILE;
else
gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->gsbase.subcode |= GF_ASM_INPUT;
else
gs->gsbase.subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.types;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.handler;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.handler;
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.types;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.failure;
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
return gs->gimple_eh_mnt.fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
gs->gimple_eh_mnt.fndecl = decl;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->gsbase.subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.eval;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.cleanup;
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.cleanup = cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gimple_wce.cleanup;
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gimple_wce.cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gsbase.subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gsbase.subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return &gs->gimple_phi.result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gs->gimple_phi.result = result;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_assert (index <= gs->gimple_phi.capacity);
return &(gs->gimple_phi.args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_assert (index <= gs->gimple_phi.nargs);
memcpy (gs->gimple_phi.args + index, phiarg, sizeof (struct phi_arg_d));
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
gs->gimple_eh_ctrl.region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
gs->gimple_eh_ctrl.region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (gimple_num_ops (gs) > index + 1);
gcc_assert (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR);
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
return gimple_switch_label (gs, 0);
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return gs->omp.body;
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
gs->omp.body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return gs->gimple_omp_critical.name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return &gs->gimple_omp_critical.name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
gs->gimple_omp_critical.name = name;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].incr = incr;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.pre_body;
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return gs->gimple_omp_single.clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return &gs->gimple_omp_single.clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
gs->gimple_omp_single.clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (TREE_CODE_CLASS (cond) == tcc_comparison);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gimple_omp_atomic_store.val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return g->gimple_omp_atomic_store.val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return &g->gimple_omp_atomic_store.val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_use = use;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statment STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->gsbase.subcode |= GF_PREDICT_TAKEN;
else
gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
type = gimple_call_return_type (stmt);
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
gsi_start (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the first statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
gsi_last (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the last statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return true if I is at the end of its sequence. */
static inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
}
/* Return true if I is one statement before the end of its sequence. */
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->next == NULL;
}
/* Advance the iterator to the next gimple statement. */
static inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->next;
}
/* Advance the iterator to the previous gimple statement. */
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->prev;
}
/* Return the current stmt. */
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr->stmt;
}
/* Return a block statement iterator that points to the first non-label
statement in block BB. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
return gsi;
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_next (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_prev (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_next_nondebug (&i);
return i;
}
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_prev_nondebug (&i);
return i;
}
/* Return a pointer to the current stmt.
NOTE: You may want to use gsi_replace on the iterator itself,
as this performs additional bookkeeping that will not be done
if you simply assign through a pointer returned by gsi_stmt_ptr. */
static inline gimple *
gsi_stmt_ptr (gimple_stmt_iterator *i)
{
return &i->ptr->stmt;
}
/* Return the basic block associated with this iterator. */
static inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
}
/* Return the sequence associated with this iterator. */
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return i.seq;
}
enum gsi_iterator_update
{
GSI_NEW_STMT, /* Only valid when single statement is added, move
iterator to it. */
GSI_SAME_STMT, /* Leave the iterator at the same statement. */
GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
for linking other statements in the same
direction. */
};
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
gimple_seq gsi_split_seq_before (gimple_stmt_iterator *);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_after (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_remove (gimple_stmt_iterator *, bool);
gimple_stmt_iterator gsi_for_stmt (gimple);
void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *);
void gsi_insert_on_edge (edge, gimple);
void gsi_insert_seq_on_edge (edge, gimple_seq);
basic_block gsi_insert_on_edge_immediate (edge, gimple);
basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
void gsi_commit_one_edge_insert (edge, basic_block *);
void gsi_commit_edge_inserts (void);
gimple gimple_call_copy_skip_args (gimple, bitmap);
/* Convenience routines to walk all statements of a gimple function.
Note that this is useful exclusively before the code is converted
into SSA form. Once the program is in SSA form, the standard
operand interface should be used to analyze/modify statements. */
struct walk_stmt_info
{
/* Points to the current statement being walked. */
gimple_stmt_iterator gsi;
/* Additional data that the callback functions may want to carry
through the recursion. */
void *info;
/* Pointer map used to mark visited tree nodes when calling
walk_tree on each operand. If set to NULL, duplicate tree nodes
will be visited more than once. */
struct pointer_set_t *pset;
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
means the common subset of is_gimple_lvalue and is_gimple_rhs,
but we never try to form anything more complicated than that, so
we don't bother checking.
Also note that CALLBACK should update this flag while walking the
sub-expressions of a statement. For instance, when walking the
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
bool val_only;
/* True if we are currently walking the LHS of an assignment. */
bool is_lhs;
/* Optional. Set to true by the callback functions if they made any
changes. */
bool changed;
/* True if we're interested in location information. */
bool want_locations;
/* Operand returned by the callbacks. This is set when calling
walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
returns non-NULL, this field will contain the tree returned by
the last callback. */
tree callback_result;
};
/* Callback for walk_gimple_stmt. Called for every statement found
during traversal. The first argument points to the statement to
walk. The second argument is a flag that the callback sets to
'true' if it the callback handled all the operands and
sub-statements of the statement (the default value of this flag is
'false'). The third argument is an anonymous pointer to data
to be used by the callback. */
typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
#ifdef GATHER_STATISTICS
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_seq, /* Sequences. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
#endif /* GATHER_STATISTICS */
extern void dump_gimple_statistics (void);
#endif /* GCC_GIMPLE_H */
|
matrix_oper.h | #ifndef MATRIX_OPER_H_
#define MATRIX_OPER_H_
namespace acspo {
template <typename T>
matrix<T> & operator+=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] += m2ptr[i];
}
return mat1;
}
template <typename T>
matrix<T> & operator-=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] -= m2ptr[i];
}
return mat1;
}
template <typename T>
matrix<T> & operator*=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] *= m2ptr[i];
}
return mat1;
}
template <typename T>
matrix<T> & operator/=(matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
m1ptr[i] /= m2ptr[i];
}
return mat1;
}
template <typename T, typename S>
matrix<T> & operator+=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] += val;
}
return mat;
}
template <typename T, typename S>
matrix<T> & operator-=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] -= val;
}
return mat;
}
template <typename T, typename S>
matrix<T> & operator*=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] *= val;
}
return mat;
}
template <typename T, typename S>
matrix<T> & operator/=(matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
mptr[i] /= val;
}
return mat;
}
template <typename T>
matrix<T> operator-(const matrix<T> &mat)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = -mptr[i];
}
return ret;
}
template <typename T>
matrix<T> operator+(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] + m2ptr[i];
}
return ret;
}
template <typename T>
matrix<T> operator-(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] - m2ptr[i];
}
return ret;
}
template <typename T>
matrix<T> operator*(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] * m2ptr[i];
}
return ret;
}
template <typename T>
matrix<T> operator/(const matrix<T> &mat1, const matrix<T> &mat2)
{
if (mat1.size() != mat2.size()) {
throw std::runtime_error("dimension mismatch");
}
unsigned int elem = mat1.elem();
matrix<T> ret(mat1.size());
T *rptr = ret.ptr();
const T *m1ptr = mat1.ptr();
const T *m2ptr = mat2.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = m1ptr[i] / m2ptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<T> operator+(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] + val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator-(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] - val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator*(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] * val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator/(const matrix<T> &mat, const S &val)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = mptr[i] / val;
}
return ret;
}
template <typename T, typename S>
matrix<T> operator+(const S &val, const matrix<T> &mat)
{
return mat + val;
}
template <typename T, typename S>
matrix<T> operator-(const S &val, const matrix<T> &mat)
{
unsigned int elem = mat.elem();
matrix<T> ret(mat.size());
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = val - mptr[i];
}
return ret;
}
template <typename T, typename S>
matrix<T> operator*(const S &val, const matrix<T> &mat)
{
return mat * val;
}
template <typename T, typename S>
matrix<T> operator/(const S &val, const matrix<T> &mat)
{
unsigned int rows = mat.rows(), cols = mat.cols(), elem = mat.elem();
matrix<T> ret(rows, cols);
T *rptr = ret.ptr();
const T *mptr = mat.ptr();
#pragma omp parallel for simd
for (unsigned int i = 0; i < elem; i++) {
rptr[i] = val / mptr[i];
}
return ret;
}
}
#endif
|
Structures.h | /// \ingroup base
//
/// \class ttk::FTMTree_MT
/// \author Charles Gueunet <charles.gueunet@lip6.fr>
/// \date September 2016.
///
///\brief TTK structures for the contour tree
#ifndef STRUCTURES_H
#define STRUCTURES_H
#include <forward_list>
#include <iterator>
#include <memory>
#include <vector>
#include <boost/heap/fibonacci_heap.hpp>
#include "AtomicVector.h"
#include "FTMTree_DataTypes.h"
// todo remove
#include<iostream>
namespace ttk
{
namespace ftm
{
// Compute parameters (global)
struct Params {
TreeType treeType;
bool segm = true;
bool normalize = true;
bool advStats = true;
int samplingLvl = 0;
};
#ifdef TTK_ENABLE_FTM_TREE_STATS_TIME
struct ActiveTask {
float begin = -1;
float end = -1;
SimplexId origin = nullVertex;
};
#endif
// Scalar related containers (global)
struct Scalars {
SimplexId size;
void* values;
void* offsets;
std::shared_ptr<std::vector<SimplexId>> sortedVertices, mirrorVertices;
// Need vertices to be sorted : use mirrorVertices.
bool isLower(SimplexId a, SimplexId b) const
{
return (*mirrorVertices)[a] < (*mirrorVertices)[b];
}
bool isEqLower(SimplexId a, SimplexId b) const
{
return (*mirrorVertices)[a] <= (*mirrorVertices)[b];
}
bool isHigher(SimplexId a, SimplexId b) const
{
return (*mirrorVertices)[a] > (*mirrorVertices)[b];
}
bool isEqHigher(SimplexId a, SimplexId b) const
{
return (*mirrorVertices)[a] >= (*mirrorVertices)[b];
}
Scalars()
: size(0),
values(nullptr),
offsets(nullptr),
sortedVertices(nullptr),
mirrorVertices(nullptr)
{
}
// Heavy
Scalars(const Scalars& o)
: size(o.size),
values(o.values),
offsets(o.offsets),
sortedVertices(o.sortedVertices),
mirrorVertices(o.mirrorVertices)
{
std::cout << "copy in depth, bad perfs" << std::endl;
}
// Sort
template <typename type>
void qsort(type arr[], const long int begin, const long int stop,
std::function<bool(type, type)> comp) const
{
if (begin >= stop)
return;
static const long int MINSIZE = 10;
long int left = begin - 1;
long int right = stop + 1;
const type pivot = arr[begin];
while (1) {
while (comp(pivot, arr[--right]))
;
while (++left <= stop && !comp(pivot, arr[left]))
;
if (left < right)
swap_el<type>(arr, left, right);
else
break;
}
swap_el<type>(arr, begin, right);
#ifdef TTK_ENABLE_OPENMP
#pragma omp task untied if (right - begin > MINSIZE)
#endif
qsort(arr, begin, right - 1, comp);
#ifdef TTK_ENABLE_OPENMP
#pragma omp task untied if (stop - right > MINSIZE)
#endif
qsort(arr, right + 1, stop, comp);
}
private:
template <typename type>
static void swap_el(type arr[], const size_t a, const size_t b)
{
const type tmp = arr[a];
arr[a] = arr[b];
arr[b] = tmp;
}
};
struct CurrentState {
SimplexId vertex;
boost::heap::fibonacci_heap<SimplexId, boost::heap::compare<VertCompFN>> propagation;
CurrentState(SimplexId startVert, VertCompFN vertComp)
: vertex(startVert), propagation(vertComp)
{
}
CurrentState(VertCompFN vertComp)
: vertex(nullVertex), propagation(vertComp)
{
// will need to use setStartVert before use
}
void setStartVert(const SimplexId v)
{
vertex = v;
}
SimplexId getNextMinVertex(void)
{
vertex = propagation.top();
propagation.pop();
return vertex;
}
void addNewVertex(const SimplexId v)
{
propagation.emplace(v);
}
void merge(CurrentState& other)
{
propagation.merge(other.propagation);
vertex = propagation.top();
}
bool empty()
{
return propagation.empty();
}
// DEBUG ONLY
bool find(SimplexId v)
{
return std::find(propagation.begin(), propagation.end(), v) != propagation.end();
}
};
struct SharedData {
SimplexId extrema;
AtomicVector<CurrentState*> states;
AtomicVector<idSuperArc> openedArcs;
explicit SharedData(SimplexId e) : extrema(e), states(50), openedArcs(50)
{
}
void addState(CurrentState* curState)
{
const idThread& thisTask = states.getNext();
states[thisTask] = curState;
}
void addArc(const idSuperArc arc)
{
idSuperArc thisArc = openedArcs.getNext();
openedArcs[thisArc] = arc;
}
void merge(SharedData& other)
{
for (auto* state : other.states) {
addState(state);
}
for (auto& arc : other.openedArcs) {
addArc(arc);
}
}
void reserve(const size_t& s)
{
states.reserve(s);
openedArcs.reserve(s);
}
};
struct Comparison {
VertCompFN vertLower, vertHigher;
};
using segm_it = std::vector<SimplexId>::iterator;
using segm_rev_it = std::vector<SimplexId>::reverse_iterator;
using segm_const_it = std::vector<SimplexId>::const_iterator;
using segm_const_rev_it = std::vector<SimplexId>::const_reverse_iterator;
// Segmentation data
struct Region {
// inverted in case of split tree
segm_it segmentBegin;
segm_it segmentEnd;
};
}
}
#endif /* end of include guard: STRUCTURES_H */
|
GB_binop__land_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int8
// A.*B function (eWiseMult): GB_AemultB__land_int8
// A*D function (colscale): GB_AxD__land_int8
// D*A function (rowscale): GB_DxB__land_int8
// C+=B function (dense accum): GB_Cdense_accumB__land_int8
// C+=b function (dense accum): GB_Cdense_accumb__land_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int8
// C=scalar+B GB_bind1st__land_int8
// C=scalar+B' GB_bind1st_tran__land_int8
// C=A+scalar GB_bind2nd__land_int8
// C=A'+scalar GB_bind2nd_tran__land_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target.c | #include <stdio.h>
#include <stdlib.h>
extern void foo();
int main(int argc, char** argv) {
int i;
foo(); // call the followed code block in rose_foo.c
/*
{
int _threads_per_block_ = 1024;
int _num_blocks_ = 256;
int64_t __device_id = 0;
void *__host_ptr = (void *)OUT__2__9009__main__7__id__;
void *__args_base[] = {};
void *__args[] = {};
int64_t __arg_sizes[] = {};
int64_t __arg_types[] = {};
int32_t __arg_num = 0;
__tgt_target_teams(__device_id,__host_ptr,__arg_num,__args_base,__args,__arg_sizes,__arg_types,_threads_per_block_,_num_blocks_);
}
*/
#pragma omp target
#pragma omp parallel for
for (i = 0; i < 6; i++) {
printf("Test 2.\n");
}
return 0;
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "dark_cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMCHARS 37
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = random_gen()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed)
{
int speed = rand_int(1, augment_speed);
if (speed < 1) speed = 1;
char** sequentia_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d, mini_batch = %d \n", n, mini_batch);
unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int));
for (i = 0; i < mini_batch; ++i) {
start_time_indexes[i] = random_gen() % m;
//printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]);
}
for (i = 0; i < n; ++i) {
do {
int time_line_index = i % mini_batch;
unsigned int index = start_time_indexes[time_line_index] % m;
start_time_indexes[time_line_index] += speed;
//int index = random_gen() % m;
sequentia_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf(" index = %u - grp: %s \n", index, paths[index]);
if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]);
} while (strlen(sequentia_paths[i]) == 0);
}
free(start_time_indexes);
pthread_mutex_unlock(&mutex);
return sequentia_paths;
}
char **get_random_paths(char **paths, int n, int m)
{
char** random_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d \n", n);
for(i = 0; i < n; ++i){
do {
int index = random_gen() % m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf("grp: %s\n", paths[index]);
if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]);
} while (strlen(random_paths[i]) == 0);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char** replace_paths = (char**)calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_augment_image(im, angle, aspect, min, max, size);
int flip = use_flip ? random_gen() % 2 : 0;
if (flip)
flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
extern int check_mistakes;
box_label *read_boxes(char *filename, int *n)
{
box_label* boxes = (box_label*)calloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file) {
printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename);
//file_error(filename);
FILE* fw = fopen("bad.list", "a");
fwrite(filename, sizeof(char), strlen(filename), fw);
char *new_line = "\n";
fwrite(new_line, sizeof(char), strlen(new_line), fw);
fclose(fw);
if (check_mistakes) getchar();
*n = 0;
return boxes;
}
float x, y, h, w;
int id;
int count = 0;
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = random_gen()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 ||
(boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1)
{
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .001 || h < .001) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy,
int net_w, int net_h)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
int i;
box_label *boxes = read_boxes(labelpath, &count);
float lowest_w = 1.F / net_w;
float lowest_h = 1.F / net_h;
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if (count > num_boxes) count = num_boxes;
float x, y, w, h;
int id;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
// not detect small objects
//if ((w < 0.001F || h < 0.001F)) continue;
// if truth (box for object) is smaller than 1x1 pix
char buff[256];
if (id >= classes) {
printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1));
sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1));
system(buff);
getchar();
++sub;
continue;
}
if ((w < lowest_w || h < lowest_h)) {
//sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath);
//system(buff);
++sub;
continue;
}
if (x == 999999 || y == 999999) {
printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n");
sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (x <= 0 || x > 1 || y <= 0 || y > 1) {
printf("\n Wrong annotation: x = %f, y = %f \n", x, y);
sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (w > 1) {
printf("\n Wrong annotation: w = %f \n", w);
sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w);
system(buff);
w = 1;
if (check_mistakes) getchar();
}
if (h > 1) {
printf("\n Wrong annotation: h = %f \n", h);
sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h);
system(buff);
h = 1;
if (check_mistakes) getchar();
}
if (x == 0) x += lowest_w;
if (y == 0) y += lowest_h;
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if(count != 1) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels_custom(char *filename, int *size)
{
list *plist = get_paths(filename);
if(size) *size = plist->size;
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
char **get_labels(char *filename)
{
return get_labels_custom(filename, NULL);
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = random_gen()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
static box float_to_box_stride(float *f, int stride)
{
box b = { 0 };
b.x = f[0];
b.y = f[1 * stride];
b.w = f[2 * stride];
b.h = f[3 * stride];
return b;
}
void blend_truth(float *new_truth, int boxes, float *old_truth)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + t*t_size;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
new_truth_ptr[0] = old_truth_ptr[0];
new_truth_ptr[1] = old_truth_ptr[1];
new_truth_ptr[2] = old_truth_ptr[2];
new_truth_ptr[3] = old_truth_ptr[3];
new_truth_ptr[4] = old_truth_ptr[4];
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
#ifdef OPENCV
#include "http_stream.h"
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup,
float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5*boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
const char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
int flag = (c >= 3);
mat_cv *src;
src = load_image_mat_cv(filename, flag);
if (src == NULL) {
if (check_mistakes) getchar();
continue;
}
int oh = get_height_mat(src);
int ow = get_width_mat(src);
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
blur = rand_int(0, 1) ? (use_blur) : 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh)/2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow)/2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp,
blur, boxes, d.y.vals[i]);
if (i_mixup) {
image old_img = ai;
old_img.data = d.X.vals[i];
//show_image(ai, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images_cv(ai, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5*boxes * sizeof(float));
if (show_imgs)// && i_mixup) // delete i_mixup
{
image tmp_ai = copy_image(ai);
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*ai.w;
int right = (b.x + b.w / 2.)*ai.w;
int top = (b.y - b.h / 2.)*ai.h;
int bot = (b.y + b.h / 2.)*ai.h;
draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(tmp_ai, buff);
if (show_imgs == 1) {
show_image(tmp_ai, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
free_image(tmp_ai);
}
release_mat(&src);
free(truth);
}
}
free(random_paths);
if(mixup_random_paths) free(mixup_random_paths);
return d;
}
#else // OPENCV
void blend_images(image new_img, float alpha, image old_img, float beta)
{
int i;
int data_size = new_img.w * new_img.h * new_img.c;
#pragma omp parallel for
for (i = 0; i < data_size; ++i)
new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter,
float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = { 0 };
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5 * boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
image orig = load_image(filename, 0, 0, c);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh) / 2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow) / 2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
image sized = resize_image(cropped, w, h);
if (flip) flip_image(sized);
distort_image(sized, dhue, dsat, dexp);
//random_distort_image(sized, hue, saturation, exposure);
fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (i_mixup) {
image old_img = sized;
old_img.data = d.X.vals[i];
//show_image(sized, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images(sized, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = sized.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
if (show_imgs)// && i_mixup)
{
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*sized.w;
int right = (b.x + b.w / 2.)*sized.w;
int top = (b.y - b.h / 2.)*sized.h;
int bot = (b.y + b.h / 2.)*sized.h;
draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(sized, buff);
if (show_imgs == 1) {
show_image(sized, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n");
//getchar();
}
free_image(orig);
free_image(cropped);
free(truth);
}
}
free(random_paths);
if (mixup_random_paths) free(mixup_random_paths);
return d;
}
#endif // OPENCV
void *load_thread(void *ptr)
{
//srand(time(0));
//printf("Loading data: %d\n", random_gen());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter,
a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
}else if (a.type == LETTERBOX_DATA) {
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
//srand(time(0));
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data* buffers = (data*)calloc(args.threads, sizeof(data));
pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = (float**)calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = (float**)calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = random_gen()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data newdata = concat_data(d[i], out);
free_data(out);
out = newdata;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = random_gen()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i+b*10000][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = random_gen()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = (float**)calloc(num, sizeof(float*));
r.y.vals = (float**)calloc(num, sizeof(float*));
int i;
for(i = 0; i < num; ++i){
int index = random_gen()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data* split = (data*)calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = (float**)calloc(train.X.rows, sizeof(float*));
test.X.vals = (float**)calloc(test.X.rows, sizeof(float*));
train.y.vals = (float**)calloc(train.y.rows, sizeof(float*));
test.y.vals = (float**)calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
adaptLES.h | //Adapt initial grids
void adaptLESinit(int MAXLEVEL)
{
unrefine(y>L0/4 && level>=(MAXLEVEL-1),all);
unrefine(y>200 && level>=(MAXLEVEL-2),all);
}
//Adapt grid according to solution while running:
//SGS-flux is compared to resolved-flux to check if the solution is really a LES. If not -> REFINE
//If the SGS-flux is neglegible to resolved flux we are not taking full benefit from LES -> Coarsen
//When fluxes vanish (laminarization) check if Ed.vis < Mol.vis
//Alse check if there is any resolved flux otherwise i am not sure what to do
void adaptLESrun(int MAXLEVEL, vector u,scalar T, scalar Evis, int lev[], double yc[])
{
fprintf(ferr,"hoi\n");
double cur = 0.1, cr=0.5;
scalar fmr[] , fms[],uh[];
foreach()
{
uh[]=sqrt(sq(u.x[])+sq(u.z[]));
fmr[] = sqrt(sq(u.y[]*(uh[0,1,0]-uh[0,-1,0])/Delta));
fms[] = sqrt(sq(Evis[]*(uh[0,1,0]-(2*uh[])+uh[0,-1,0])/(sq(Delta))));
}
fprintf(ferr,"hoi\n");
int k = 0;
double xp,zp,Url[1<<MAXLEVEL];
int uri = 0;
while (yc[k] != 0)
{
#ifdef _OPENMP
#pragma omp parallel for reduction(+:F) default(shared)
#endif
double afmr=0 , afms=0 ;
double yp = yc[k];
for (int i = 0; i < pow(2,lev[k]); i++)
{
xp=X0+L0/((pow(2,lev[k]+1))+i*(L0/(pow(2,lev[k]))));
for (int j = 0; j < pow(2,lev[k]); j++)
{
zp=Z0+L0/((pow(2,lev[k]+1))+i*(L0/(pow(2,lev[k])))); Point point = locate (xp, yp,zp);
afmr += fmr[];
afms += fms[];
}
}
if (cur * afmr > afms )
{
Url[uri] = yc[k];
if (uri>0)
{
if ( (Url[uri]-Url[uri-1]) == (L0/(pow(2,lev[k]))))
unrefine(y == ( (Url[uri]+Url[uri]) /2 ),all);
}
uri++;
}
else if (cr * afmr < afms && (afmr/pow(2,2*lev[k])) > 0.01)
{
refine(y>(lev[k]-0.1) && y<(lev[k]+0.1),all);
}
k++;
boundary(all);
}
}
|
declare_variant_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// expected-no-diagnostics
int foo(void);
#pragma omp declare variant(foo) match(xxx={}, yyy={ccc})
#pragma omp declare variant(foo) match(xxx={vvv})
#pragma omp declare variant(foo) match(implementation={vendor(llvm)})
#pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx})
#pragma omp declare variant(foo) match(implementation={vendor(unknown)})
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)})
int bar(void);
// CHECK: int foo();
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5):ibm, xxx)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(unknown)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)})
// CHECK-NEXT: int bar();
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
* \param track_branch_features Whether to keep track of ancestors of leaf nodes
* \param is_linear Whether the tree has linear models at each leaf
*/
explicit Tree(int max_leaves, bool track_branch_features, bool is_linear);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
virtual ~Tree() noexcept = default;
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get parent of specific leaf*/
inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; }
/*! \brief Get feature of specific split (original feature index)*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature_inner(int split_idx) const { return split_feature_inner_[split_idx]; }
/*! \brief Get features on leaf's branch*/
inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const {
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const {
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline uint32_t threshold_in_bin(int node_idx) const {
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
virtual inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
if (is_linear_) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] * rate);
for (size_t j = 0; j < leaf_coeff_[i].size(); ++j) {
leaf_coeff_[i][j] = MaybeRoundToZero(leaf_coeff_[i][j] * rate);
}
}
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
if (is_linear_) {
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] * rate);
for (size_t j = 0; j < leaf_coeff_[num_leaves_ - 1].size(); ++j) {
leaf_coeff_[num_leaves_ - 1][j] = MaybeRoundToZero(leaf_coeff_[num_leaves_ - 1][j] * rate);
}
}
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
virtual inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
if (is_linear_) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] + val);
}
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] + val);
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
if (is_linear_) {
leaf_const_[0] = val;
}
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize linear model of tree node to json*/
std::string LinearModelToJSON(int index) const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval) {
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
/*! \brief Get the linear model constant term (bias) of one leaf */
inline double LeafConst(int leaf) const { return leaf_const_[leaf]; }
/*! \brief Get the linear model coefficients of one leaf */
inline std::vector<double> LeafCoeffs(int leaf) const { return leaf_coeff_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeaturesInner(int leaf) const {return leaf_features_inner_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeatures(int leaf) const {return leaf_features_[leaf]; }
/*! \brief Set the linear model coefficients on one leaf */
inline void SetLeafCoeffs(int leaf, const std::vector<double>& output) {
leaf_coeff_[leaf].resize(output.size());
for (size_t i = 0; i < output.size(); ++i) {
leaf_coeff_[leaf][i] = MaybeRoundToZero(output[i]);
}
}
/*! \brief Set the linear model constant term (bias) on one leaf */
inline void SetLeafConst(int leaf, double output) {
leaf_const_[leaf] = MaybeRoundToZero(output);
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeaturesInner(int leaf, const std::vector<int>& features) {
leaf_features_inner_[leaf] = features;
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeatures(int leaf, const std::vector<int>& features) {
leaf_features_[leaf] = features;
}
inline bool is_linear() const { return is_linear_; }
#ifdef USE_CUDA_EXP
inline bool is_cuda_tree() const { return is_cuda_tree_; }
#endif // USE_CUDA_EXP
inline void SetIsLinear(bool is_linear) {
is_linear_ = is_linear;
}
protected:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN) {
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval))
|| (missing_type == MissingType::NaN && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin)
|| (missing_type == MissingType::NaN && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
int int_fval;
if (std::isnan(fval)) {
return right_child_[node];
} else {
int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];
}
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values,
std::unordered_map<int, double>* phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
/*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */
bool track_branch_features_;
/*! \brief Features on leaf's branch, original index */
std::vector<std::vector<int>> branch_features_;
double shrinkage_;
int max_depth_;
/*! \brief Tree has linear model at each leaf */
bool is_linear_;
/*! \brief coefficients of linear models on leaves */
std::vector<std::vector<double>> leaf_coeff_;
/*! \brief constant term (bias) of linear models on leaves */
std::vector<double> leaf_const_;
/* \brief features used in leaf linear models; indexing is relative to num_total_features_ */
std::vector<std::vector<int>> leaf_features_;
/* \brief features used in leaf linear models; indexing is relative to used_features_ */
std::vector<std::vector<int>> leaf_features_inner_;
#ifdef USE_CUDA_EXP
/*! \brief Marks whether this tree is a CUDATree */
bool is_cuda_tree_;
#endif // USE_CUDA_EXP
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = leaf_weight_[leaf];
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
if (track_branch_features_) {
branch_features_[num_leaves_] = branch_features_[leaf];
branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]);
branch_features_[leaf].push_back(split_feature_[new_node_idx]);
}
}
inline double Tree::Predict(const double* feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeaf(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat_raw = leaf_features_[leaf][i];
double feat_val = feature_values[feat_raw];
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeafByMap(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat = leaf_features_[leaf][i];
auto val_it = feature_values.find(feat);
if (val_it != feature_values.end()) {
double feat_val = val_it->second;
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output) {
(*output)[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
s_nk.c | #include <stdlib.h>
#include "s_nk.h"
#include "threads.h"
st_nk_t alloc_st_n_k(size_t n, size_t k) {
st_nk_t st = { NULL, n, k, (n - k + 1)*(k + 1) };
st.data = (mpz_t *)malloc(sizeof(mpz_t) * st.size);
#pragma omp parallel for
for(size_t i = 0; i < st.size; ++i) {
mpz_init(st.data[i]);
}
return st;
}
void free_stirling_nk(st_nk_t *st) {
#pragma omp parallel for
for(size_t i = 0; i < st->size; ++i) {
mpz_clear(st->data[i]);
}
free(st->data);
}
|
Example_doacross.5.c | /*
* @@name: doacross.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.5
*/
float foo(int i);
float bar(float a, float b);
float baz(float b);
void work( int N, float *A, float *B, float *C )
{
int i;
#pragma omp for ordered(1)
for (i=1; i<N; i++)
{
A[i] = foo(i);
#pragma omp ordered depend(sink: i-1)
B[i] = bar(A[i], B[i-1]);
#pragma omp ordered depend(source)
C[i] = baz(B[i]);
}
}
|
mm_nested.c | /*
* Assignment2 (CSE436)
* Kazumi Malhan
* 06/08/2016
*/
/* Note
*
* This program assumes that size of matrix is dividable
* by number of tasks
* /
/* Ongoing issues !! */
// Need to put init code back
// Need to remove all debug printf
// Current code assumes that N and M are dividable by num_tasks
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
#define VECTOR_LENGTH 512
/* initialize a vector with random floating point numbers */
void init(REAL A[], int N) {
int i;
for (i = 0; i < N; i++) {
//A[i] = (double) drand48();
A[i] = i;
}
}
/* Function Prototypes */
void mm(int N, int K, int M, REAL * A, REAL * B, REAL * C);
void mm_parallel_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks);
void mm_parallel_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks);
void mm_parallel_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks);
void mm_parallel_for_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks);
void mm_parallel_for_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks);
void mm_parallel_for_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks);
/**
* To compile: gcc mm.c -fopenmp -o mm
*/
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
int M = N;
int K = N;
int num_tasks = 4;
double elapsed; /* for timing */
if (argc < 5) {
fprintf(stderr, "Usage: mm [<N(%d)>] <K(%d) [<M(%d)>] [<#tasks(%d)>]\n", N,K,M,num_tasks);
fprintf(stderr, "\t Example: ./mm %d %d %d %d\n", N,K,M,num_tasks);
} else {
N = atoi(argv[1]);
K = atoi(argv[2]);
M = atoi(argv[3]);
num_tasks = atoi(argv[4]);
}
printf("\tC[%d][%d] = A[%d][%d] * B[%d][%d] with %d tasks\n", N, M, N, K, K, M, num_tasks);
REAL * A = malloc(sizeof(REAL)*N*K);
REAL * B = malloc(sizeof(REAL)*K*M);
REAL * C = malloc(sizeof(REAL)*N*M);
srand48((1 << 12));
init(A, N*K);
init(B, K*M);
/* Serial program */
double elapsed_mm = read_timer();
mm(N, K, M, A, B, C);
elapsed_mm = (read_timer() - elapsed_mm);
/* Parallel program */
double elapsed_mm_parallel_row = read_timer();
mm_parallel_row(N, K, M, A, B, C, num_tasks);
elapsed_mm_parallel_row = (read_timer() - elapsed_mm_parallel_row);
double elapsed_mm_parallel_col = read_timer();
mm_parallel_col(N, K, M, A, B, C, num_tasks);
elapsed_mm_parallel_col = (read_timer() - elapsed_mm_parallel_col);
double elapsed_mm_parallel_rowcol = read_timer();
mm_parallel_rowcol(N, K, M, A, B, C, num_tasks);
elapsed_mm_parallel_rowcol = (read_timer() - elapsed_mm_parallel_rowcol);
/* Parallel for program */
double elapsed_mm_parallel_for_row = read_timer();
mm_parallel_for_row(N, K, M, A, B, C, num_tasks);
elapsed_mm_parallel_for_row = (read_timer() - elapsed_mm_parallel_for_row);
double elapsed_mm_parallel_for_col = read_timer();
mm_parallel_for_col(N, K, M, A, B, C, num_tasks);
elapsed_mm_parallel_for_col = (read_timer() - elapsed_mm_parallel_for_col);
double elapsed_mm_parallel_for_rowcol = read_timer();
mm_parallel_for_rowcol(N, K, M, A, B, C, num_tasks);
elapsed_mm_parallel_for_rowcol = (read_timer() - elapsed_mm_parallel_for_rowcol);
/* you should add the call to each function and time the execution */
printf("======================================================================================================\n");
printf("\tC[%d][%d] = A[%d][%d] * B[%d][%d] with %d tasks\n", N, M, N, K, K, M, num_tasks);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\t\t\tRuntime (ms)\t MFLOPS \n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("mm:\t\t\t\t%4f\t%4f\n", elapsed_mm * 1.0e3, M*N*K / (1.0e6 * elapsed_mm));
printf("mm_parallel_row:\t\t%4f\t%4f\n", elapsed_mm_parallel_row * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_row));
printf("mm_parallel_col:\t\t%4f\t%4f\n", elapsed_mm_parallel_col * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_col));
printf("mm_parallel_rowcol:\t\t%4f\t%4f\n", elapsed_mm_parallel_rowcol * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_rowcol));
printf("mm_parallel_for_row:\t\t%4f\t%4f\n", elapsed_mm_parallel_for_row * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_for_row));
printf("mm_parallel_for_col:\t\t%4f\t%4f\n", elapsed_mm_parallel_for_col * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_for_col));
printf("mm_parallel_for_rowcol:\t\t%4f\t%4f\n", elapsed_mm_parallel_for_rowcol * 1.0e3, M*N*K / (1.0e6 * elapsed_mm_parallel_for_rowcol));
free(A);
free(B);
free(C);
return 0;
}
/* Serial */
void mm(int N, int K, int M, REAL * A, REAL * B, REAL * C) {
int i, j, w;
for (i=0; i<N; i++)
for (j=0; j<M; j++) {
REAL temp = 0.0;
for (w=0; w<K; w++)
temp += A[i*K+w]*B[w*M+j];
C[i*M+j] = temp;
}
}
/* Parallel Row */
void mm_parallel_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){
int i, j, w;
omp_set_num_threads(num_tasks);
#pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w)
{
int tid, istart, iend;
tid = omp_get_thread_num();
istart = tid * (N / num_tasks);
iend = (tid + 1) * (N / num_tasks);
//printf("tid is %d\t, istart is %d\t, iend is %d\n", tid, istart, iend);
for (i=istart; i<iend; i++) { /* decompose this loop */
for (j=0; j<M; j++) {
REAL temp = 0.0;
for (w=0; w<K; w++)
temp += A[i*K+w]*B[w*M+j];
C[i*M+j] = temp;
}
}
}/* end of parallel */
}
/* Parallel Column */
void mm_parallel_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){
int i, j, w;
omp_set_num_threads(num_tasks);
#pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w)
{
int tid, jstart, jend;
tid = omp_get_thread_num();
jstart = tid * (M / num_tasks);
jend = (tid + 1) * (M / num_tasks);
for (i=0; i<N; i++) {
for (j=jstart; j<jend; j++) { /* decompose this loop */
REAL temp = 0.0;
for (w=0; w<K; w++)
temp += A[i*K+w]*B[w*M+j];
C[i*M+j] = temp;
}
}
} /* end of parallel */
}
/* Parallel Row Column */
void mm_parallel_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){
int i, j, w;
int task_r, task_c;
/* Calculate amount of work for each thread */
if (num_tasks == 1){
task_r = 1;
task_c = 1;
} else {
task_r = num_tasks / 2;
task_c = num_tasks / task_r;
}
omp_set_nested(1);
#pragma omp parallel shared (N, K, M, A, B, C, task_r, task_c) private (i, j, w) num_threads(task_r)
{
int tid1, istart, iend;
tid1 = omp_get_thread_num();
istart = (tid1/task_c) * (N/task_r);
iend = (tid1/task_c + 1) * (N/task_r);
for (i=istart; i<iend; i++) { /* decompose this loop */
#pragma omp parallel shared (N, M, K, A, B, C, task_r, task_c, i) private (j, w) num_threads(task_c)
{
int tid2, jstart, jend;
tid2 = omp_get_thread_num();
jstart = (tid2/task_r) * (M/task_c);
jend = (tid2/task_r + 1) * (M/task_c);
//printf("tid %d perform i: %d to %d, j: %d to %d\n", tid, istart, iend, jstart, jend);
for (i=istart; i<iend; i++) { /* decompose this loop */
for (j=jstart; j<jend; j++) { /* decompose this loop */
REAL temp = 0.0;
for (w=0; w<K; w++) {
temp += A[i*K+w]*B[w*M+j];
}
C[i*M+j] = temp;
}
}
}
} /* end of parallel */
}
}
/* Parallel For Row */
void mm_parallel_for_row(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){
int i, j, w;
omp_set_num_threads(num_tasks);
#pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w)
{
#pragma omp for schedule(static) nowait
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
REAL temp = 0.0;
for (w=0; w<K; w++)
temp += A[i*K+w]*B[w*M+j];
C[i*M+j] = temp;
}
}
} /* end of parallel */
}
/* Parallel For Column */
void mm_parallel_for_col(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){
int i, j, w;
omp_set_num_threads(num_tasks);
#pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w)
{
for (i=0; i<N; i++) {
#pragma omp for schedule(static) nowait
for (j=0; j<M; j++) {
REAL temp = 0.0;
for (w=0; w<K; w++)
temp += A[i*K+w]*B[w*M+j];
C[i*M+j] = temp;
}
}
} /* end of parallel */
}
/* Parallel For Row Column */
void mm_parallel_for_rowcol(int N, int K, int M, REAL * A, REAL * B, REAL * C, int num_tasks){
int i, j, w;
omp_set_num_threads(num_tasks);
#pragma omp parallel shared (N, K, M, A, B, C, num_tasks) private (i, j, w)
{
#pragma omp for collapse(2) schedule(static) nowait
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
REAL temp = 0.0;
for (w=0; w<K; w++)
temp += A[i*K+w]*B[w*M+j];
C[i*M+j] = temp;
}
}
} /* end of parallel */
}
|
GB_unaryop__minv_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int8
// op(A') function: GB_tran__minv_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int8
(
uint8_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for private(c,m,n)
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start;
start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end;
end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at the mozilla.org home page
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static int m_maxThreads = -1;
EIGEN_UNUSED_VARIABLE(m_maxThreads);
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose)
func(c0, actualBlockCols, 0, rows, info);
else
func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
npdot.c | #include <stdlib.h>
#include <string.h>
#include <complex.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/*
* numpy.dot may call unoptimized blas
*/
void NPdgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double *a, double *b, double *c,
const double alpha, const double beta)
{
const size_t dimc = ldc;
int i, j;
if (m == 0 || n == 0) {
return;
} else if (k == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
} }
return;
}
a += offseta;
b += offsetb;
c += offsetc;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
if (beta == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
}
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] *= beta;
}
}
}
#pragma omp parallel default(none) shared(a, b, c) \
private(i, j)
{
int nthread = omp_get_num_threads();
int nblk = MAX((k+nthread-1) / nthread, 1);
double D0 = 0;
double *cpriv = malloc(sizeof(double) * m * n);
int di;
size_t ij;
size_t astride = nblk;
size_t bstride = nblk;
if (trans_a == 'N') {
astride *= lda;
}
if (trans_b != 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &n, &di,
&alpha, a+astride*i, &lda,
b+bstride*i, &ldb,
&D0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*dimc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n+4) { // parallelize m
#pragma omp parallel default(none) shared(a, b, c)
{
int nthread = omp_get_num_threads();
int nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
int di;
size_t bstride = nblk;
if (trans_a != 'N') {
bstride *= lda;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &di, &n, &k,
&alpha, a+bstride*i, &lda, b, &ldb,
&beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
#pragma omp parallel default(none) shared(a, b, c)
{
int nthread = omp_get_num_threads();
int nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
int di;
size_t bstride = nblk;
size_t cstride = dimc * nblk;
if (trans_b == 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &di, &k,
&alpha, a, &lda, b+bstride*i, &ldb,
&beta, c+cstride*i, &ldc);
}
}
}
}
}
void NPzgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double complex *a, double complex *b, double complex *c,
const double complex *alpha, const double complex *beta)
{
const size_t dimc = ldc;
int i, j;
if (m == 0 || n == 0) {
return;
} else if (k == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
} }
return;
}
a += offseta;
b += offsetb;
c += offsetc;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
if (creal(*beta) == 0 && cimag(*beta) == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
}
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] *= beta[0];
}
}
}
#pragma omp parallel default(none) shared(a, b, c, alpha) \
private(i, j)
{
int nthread = omp_get_num_threads();
int nblk = MAX((k+nthread-1) / nthread, 1);
double complex Z0 = 0;
double complex *cpriv = malloc(sizeof(double complex) * m * n);
int di;
size_t ij;
size_t astride = nblk;
size_t bstride = nblk;
if (trans_a == 'N') {
astride *= lda;
}
if (trans_b != 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &m, &n, &di,
alpha, a+astride*i, &lda,
b+bstride*i, &ldb,
&Z0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*dimc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n+4) { // parallelize m
#pragma omp parallel default(none) shared(a, b, c, alpha, beta)
{
int nthread = omp_get_num_threads();
int nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
int di;
size_t bstride = nblk;
if (trans_a != 'N') {
bstride *= lda;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &di, &n, &k,
alpha, a+bstride*i, &lda, b, &ldb,
beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
#pragma omp parallel default(none) shared(a, b, c, alpha, beta)
{
int nthread = omp_get_num_threads();
int nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
int di;
size_t bstride = nblk;
size_t cstride = dimc * nblk;
if (trans_b == 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &m, &di, &k,
alpha, a, &lda, b+bstride*i, &ldb,
beta, c+cstride*i, &ldc);
}
}
}
}
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (colors > MaxColormapSize)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
GetPixelInfo(image,image->colormap+i);
pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1)));
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=(MagickRealType) OpaqueAlpha;
image->colormap[i].alpha_trait=BlendPixelTrait;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int)
GetPixelInfoIntensity((const Image *) NULL,color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
ssize_t
x;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
symmetry.c | /* Copyright (C) 2008 Atsushi Togo */
/* All rights reserved. */
/* This file is part of spglib. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cell.h"
#include "delaunay.h"
#include "mathfunc.h"
#include "symmetry.h"
#include "debug.h"
#define NUM_ATOMS_CRITERION_FOR_OPENMP 1000
#define ANGLE_REDUCE_RATE 0.95
#define NUM_ATTEMPT 100
#define PI 3.14159265358979323846
/* Tolerance of angle between lattice vectors in degrees */
/* Negative value invokes converter from symprec. */
static int relative_axes[][3] = {
{ 1, 0, 0},
{ 0, 1, 0},
{ 0, 0, 1},
{-1, 0, 0},
{ 0,-1, 0}, /* 5 */
{ 0, 0,-1},
{ 0, 1, 1},
{ 1, 0, 1},
{ 1, 1, 0},
{ 0,-1,-1}, /* 10 */
{-1, 0,-1},
{-1,-1, 0},
{ 0, 1,-1},
{-1, 0, 1},
{ 1,-1, 0}, /* 15 */
{ 0,-1, 1},
{ 1, 0,-1},
{-1, 1, 0},
{ 1, 1, 1},
{-1,-1,-1}, /* 20 */
{-1, 1, 1},
{ 1,-1, 1},
{ 1, 1,-1},
{ 1,-1,-1},
{-1, 1,-1}, /* 25 */
{-1,-1, 1},
};
static int identity[3][3] = {{1, 0, 0},
{0, 1, 0},
{0, 0, 1}};
static int get_index_with_least_atoms(const Cell *cell);
static VecDBL * get_translation(SPGCONST int rot[3][3],
const Cell *cell,
const double symprec,
const int is_identity);
static Symmetry * get_operations(const Cell *primitive,
const double symprec,
const double angle_symprec);
static Symmetry * reduce_operation(const Cell * primitive,
const Symmetry * symmetry,
const double symprec,
const double angle_symprec);
static int search_translation_part(int lat_point_atoms[],
const Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity);
static int is_overlap_all_atoms(const double test_trans[3],
SPGCONST int rot[3][3],
const Cell * cell,
const double symprec,
const int is_identity);
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * point_sym_prim,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3]);
static Symmetry *
get_space_group_operations(SPGCONST PointSymmetry *lattice_sym,
const Cell *primitive,
const double symprec);
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3);
static PointSymmetry get_lattice_symmetry(SPGCONST double cell_lattice[3][3],
const double symprec,
const double angle_symprec);
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec,
const double angle_symprec);
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j);
/* Return NULL if failed */
Symmetry * sym_alloc_symmetry(const int size)
{
Symmetry *symmetry;
symmetry = NULL;
if (size < 1) {
return NULL;
}
if ((symmetry = (Symmetry*) malloc(sizeof(Symmetry))) == NULL) {
warning_print("spglib: Memory could not be allocated ");
return NULL;
}
symmetry->size = size;
symmetry->rot = NULL;
symmetry->trans = NULL;
if ((symmetry->rot =
(int (*)[3][3]) malloc(sizeof(int[3][3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
free(symmetry);
symmetry = NULL;
return NULL;
}
if ((symmetry->trans =
(double (*)[3]) malloc(sizeof(double[3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
free(symmetry->rot);
symmetry->rot = NULL;
free(symmetry);
symmetry = NULL;
return NULL;
}
return symmetry;
}
void sym_free_symmetry(Symmetry *symmetry)
{
if (symmetry->size > 0) {
free(symmetry->rot);
symmetry->rot = NULL;
free(symmetry->trans);
symmetry->trans = NULL;
}
free(symmetry);
}
/* Return NULL if failed */
Symmetry * sym_get_operation(const Cell * primitive,
const double symprec,
const double angle_tolerance)
{
debug_print("sym_get_operations:\n");
return get_operations(primitive, symprec, angle_tolerance);
}
/* Return NULL if failed */
Symmetry * sym_reduce_operation(const Cell * primitive,
const Symmetry * symmetry,
const double symprec,
const double angle_tolerance)
{
return reduce_operation(primitive, symmetry, symprec, angle_tolerance);
}
/* Return NULL if failed */
VecDBL * sym_get_pure_translation(const Cell *cell,
const double symprec)
{
int multi;
VecDBL * pure_trans;
debug_print("sym_get_pure_translation (tolerance = %f):\n", symprec);
multi = 0;
pure_trans = NULL;
if ((pure_trans = get_translation(identity, cell, symprec, 1)) == NULL) {
warning_print("spglib: get_translation failed (line %d, %s).\n",
__LINE__, __FILE__);
return NULL;
}
multi = pure_trans->size;
if ((cell->size / multi) * multi == cell->size) {
debug_print(" sym_get_pure_translation: pure_trans->size = %d\n", multi);
} else {
;
warning_print("spglib: Finding pure translation failed (line %d, %s).\n", __LINE__, __FILE__);
warning_print(" cell->size %d, multi %d\n", cell->size, multi);
}
return pure_trans;
}
/* Return NULL if failed */
VecDBL * sym_reduce_pure_translation(const Cell * cell,
const VecDBL * pure_trans,
const double symprec,
const double angle_tolerance)
{
int i, multi;
Symmetry *symmetry, *symmetry_reduced;
VecDBL * pure_trans_reduced;
symmetry = NULL;
symmetry_reduced = NULL;
pure_trans_reduced = NULL;
multi = pure_trans->size;
if ((symmetry = sym_alloc_symmetry(multi)) == NULL) {
return NULL;
}
for (i = 0; i < multi; i++) {
mat_copy_matrix_i3(symmetry->rot[i], identity);
mat_copy_vector_d3(symmetry->trans[i], pure_trans->vec[i]);
}
if ((symmetry_reduced =
reduce_operation(cell, symmetry, symprec, angle_tolerance)) == NULL) {
sym_free_symmetry(symmetry);
symmetry = NULL;
return NULL;
}
sym_free_symmetry(symmetry);
symmetry = NULL;
multi = symmetry_reduced->size;
if ((pure_trans_reduced = mat_alloc_VecDBL(multi)) == NULL) {
sym_free_symmetry(symmetry_reduced);
symmetry_reduced = NULL;
return NULL;
}
for (i = 0; i < multi; i++) {
mat_copy_vector_d3(pure_trans_reduced->vec[i], symmetry_reduced->trans[i]);
}
sym_free_symmetry(symmetry_reduced);
symmetry_reduced = NULL;
return pure_trans_reduced;
}
/* 1) Pointgroup operations of the primitive cell are obtained. */
/* These are constrained by the input cell lattice pointgroup, */
/* i.e., even if the lattice of the primitive cell has higher */
/* symmetry than that of the input cell, it is not considered. */
/* 2) Spacegroup operations are searched for the primitive cell */
/* using the constrained point group operations. */
/* 3) The spacegroup operations for the primitive cell are */
/* transformed to those of original input cells, if the input cell */
/* was not a primitive cell. */
static Symmetry * get_operations(const Cell *primitive,
const double symprec,
const double angle_symprec)
{
PointSymmetry lattice_sym;
Symmetry *symmetry;
debug_print("get_operations:\n");
symmetry = NULL;
lattice_sym = get_lattice_symmetry(primitive->lattice,
symprec,
angle_symprec);
if (lattice_sym.size == 0) {
return NULL;
}
if ((symmetry = get_space_group_operations(&lattice_sym,
primitive,
symprec)) == NULL) {
return NULL;
}
return symmetry;
}
/* Return NULL if failed */
static Symmetry * reduce_operation(const Cell * primitive,
const Symmetry * symmetry,
const double symprec,
const double angle_symprec)
{
int i, j, num_sym;
Symmetry * sym_reduced;
PointSymmetry point_symmetry;
MatINT *rot;
VecDBL *trans;
debug_print("reduce_operation:\n");
sym_reduced = NULL;
rot = NULL;
trans = NULL;
point_symmetry = get_lattice_symmetry(primitive->lattice,
symprec,
angle_symprec);
if (point_symmetry.size == 0) {
return NULL;
}
if ((rot = mat_alloc_MatINT(symmetry->size)) == NULL) {
return NULL;
}
if ((trans = mat_alloc_VecDBL(symmetry->size)) == NULL) {
mat_free_MatINT(rot);
rot = NULL;
return NULL;
}
num_sym = 0;
for (i = 0; i < point_symmetry.size; i++) {
for (j = 0; j < symmetry->size; j++) {
if (mat_check_identity_matrix_i3(point_symmetry.rot[i],
symmetry->rot[j])) {
if (is_overlap_all_atoms(symmetry->trans[j],
symmetry->rot[j],
primitive,
symprec,
0)) {
mat_copy_matrix_i3(rot->mat[num_sym], symmetry->rot[j]);
mat_copy_vector_d3(trans->vec[num_sym], symmetry->trans[j]);
num_sym++;
}
}
}
}
if ((sym_reduced = sym_alloc_symmetry(num_sym)) != NULL) {
for (i = 0; i < num_sym; i++) {
mat_copy_matrix_i3(sym_reduced->rot[i], rot->mat[i]);
mat_copy_vector_d3(sym_reduced->trans[i], trans->vec[i]);
}
}
mat_free_MatINT(rot);
rot = NULL;
mat_free_VecDBL(trans);
trans = NULL;
return sym_reduced;
}
/* Look for the translations which satisfy the input symmetry operation. */
/* This function is heaviest in this code. */
/* Return NULL if failed */
static VecDBL * get_translation(SPGCONST int rot[3][3],
const Cell *cell,
const double symprec,
const int is_identity)
{
int i, j, k, min_atom_index, num_trans;
int *is_found;
double origin[3];
VecDBL *trans;
debug_print("get_translation (tolerance = %f):\n", symprec);
num_trans = 0;
is_found = NULL;
trans = NULL;
#ifdef _OPENMP
int num_min_type_atoms;
int *min_type_atoms;
double vec[3];
min_type_atoms = NULL;
#endif
if ((is_found = (int*) malloc(sizeof(int)*cell->size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
return NULL;
}
for (i = 0; i < cell->size; i++) {
is_found[i] = 0;
}
/* Look for the atom index with least number of atoms within same type */
min_atom_index = get_index_with_least_atoms(cell);
if (min_atom_index == -1) {
debug_print("spglib: get_index_with_least_atoms failed.\n");
goto ret;
}
/* Set min_atom_index as the origin to measure the distance between atoms. */
mat_multiply_matrix_vector_id3(origin, rot, cell->position[min_atom_index]);
#ifdef _OPENMP
if (cell->size < NUM_ATOMS_CRITERION_FOR_OPENMP) {
num_trans = search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
if (num_trans == 0) {
goto ret;
}
} else {
/* Collect indices of atoms with the type where the minimum number */
/* of atoms belong. */
if ((min_type_atoms = (int*) malloc(sizeof(int)*cell->size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
goto ret;
}
num_min_type_atoms = 0;
for (i = 0; i < cell->size; i++) {
if (cell->types[i] == cell->types[min_atom_index]) {
min_type_atoms[num_min_type_atoms] = i;
num_min_type_atoms++;
}
}
#pragma omp parallel for private(j, vec)
for (i = 0; i < num_min_type_atoms; i++) {
for (j = 0; j < 3; j++) {
vec[j] = cell->position[min_type_atoms[i]][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
is_found[min_type_atoms[i]] = 1;
}
}
free(min_type_atoms);
min_type_atoms = NULL;
for (i = 0; i < cell->size; i++) {
num_trans += is_found[i];
}
}
#else
num_trans = search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
if (num_trans == 0) {
goto ret;
}
#endif
if ((trans = mat_alloc_VecDBL(num_trans)) == NULL) {
goto ret;
}
k = 0;
for (i = 0; i < cell->size; i++) {
if (is_found[i]) {
for (j = 0; j < 3; j++) {
trans->vec[k][j] = cell->position[i][j] - origin[j];
trans->vec[k][j] -= mat_Nint(trans->vec[k][j]);
}
k++;
}
}
ret:
free(is_found);
is_found = NULL;
return trans;
}
static int search_translation_part(int lat_point_atoms[],
const Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity)
{
int i, j, num_trans;
double vec[3];
num_trans = 0;
for (i = 0; i < cell->size; i++) {
if (cell->types[i] != cell->types[min_atom_index]) {
continue;
}
for (j = 0; j < 3; j++) {
vec[j] = cell->position[i][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
lat_point_atoms[i] = 1;
num_trans++;
}
}
return num_trans;
}
static int is_overlap_all_atoms(const double trans[3],
SPGCONST int rot[3][3],
const Cell * cell,
const double symprec,
const int is_identity)
{
int i, j, k, is_found;
double pos_rot[3], d_frac[3], d[3];
for (i = 0; i < cell->size; i++) {
if (is_identity) { /* Identity matrix is treated as special for speed-up. */
for (j = 0; j < 3; j++) {
pos_rot[j] = cell->position[i][j] + trans[j];
}
} else {
mat_multiply_matrix_vector_id3(pos_rot,
rot,
cell->position[i]);
for (j = 0; j < 3; j++) {
pos_rot[j] += trans[j];
}
}
is_found = 0;
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
/* here cel_is_overlap can be used, but for the tuning */
/* purpose, write it again */
for (k = 0; k < 3; k++) {
d_frac[k] = pos_rot[k] - cell->position[j][k];
d_frac[k] -= mat_Nint(d_frac[k]);
}
mat_multiply_matrix_vector_d3(d, cell->lattice, d_frac);
if (sqrt(d[0] * d[0] + d[1] * d[1] + d[2] * d[2]) < symprec) {
is_found = 1;
break;
}
}
}
if (! is_found) {
goto not_found;
}
}
return 1; /* found */
not_found:
return 0;
}
static int get_index_with_least_atoms(const Cell *cell)
{
int i, j, min, min_index;
int *mapping;
mapping = NULL;
if ((mapping = (int *) malloc(sizeof(int) * cell->size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
return -1;
}
for (i = 0; i < cell->size; i++) {
mapping[i] = 0;
}
for (i = 0; i < cell->size; i++) {
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
mapping[j]++;
break;
}
}
}
min = mapping[0];
min_index = 0;
for (i = 0; i < cell->size; i++) {
if (min > mapping[i] && mapping[i] >0) {
min = mapping[i];
min_index = i;
}
}
free(mapping);
mapping = NULL;
return min_index;
}
/* Return NULL if failed */
static Symmetry *
get_space_group_operations(SPGCONST PointSymmetry *lattice_sym,
const Cell *primitive,
const double symprec)
{
int i, j, num_sym, total_num_sym;
VecDBL **trans;
Symmetry *symmetry;
debug_print("get_space_group_operations (tolerance = %f):\n", symprec);
trans = NULL;
symmetry = NULL;
if ((trans = (VecDBL**) malloc(sizeof(VecDBL*) * lattice_sym->size))
== NULL) {
warning_print("spglib: Memory could not be allocated ");
return NULL;
}
for (i = 0; i < lattice_sym->size; i++) {
trans[i] = NULL;
}
total_num_sym = 0;
for (i = 0; i < lattice_sym->size; i++) {
if ((trans[i] = get_translation(lattice_sym->rot[i], primitive, symprec, 0))
!= NULL) {
debug_print(" match translation %d/%d; tolerance = %f\n",
i + 1, lattice_sym->size, symprec);
total_num_sym += trans[i]->size;
}
}
if ((symmetry = sym_alloc_symmetry(total_num_sym)) == NULL) {
goto ret;
}
num_sym = 0;
for (i = 0; i < lattice_sym->size; i++) {
if (trans[i] == NULL) {
continue;
}
for (j = 0; j < trans[i]->size; j++) {
mat_copy_vector_d3(symmetry->trans[num_sym + j], trans[i]->vec[j]);
mat_copy_matrix_i3(symmetry->rot[num_sym + j], lattice_sym->rot[i]);
}
num_sym += trans[i]->size;
}
ret:
for (i = 0; i < lattice_sym->size; i++) {
if (trans[i] != NULL) {
mat_free_VecDBL(trans[i]);
trans[i] = NULL;
}
}
free(trans);
trans = NULL;
return symmetry;
}
static PointSymmetry get_lattice_symmetry(SPGCONST double cell_lattice[3][3],
const double symprec,
const double angle_symprec)
{
int i, j, k, attempt, num_sym;
double angle_tol;
int axes[3][3];
double lattice[3][3], min_lattice[3][3];
double metric[3][3], metric_orig[3][3];
PointSymmetry lattice_sym;
debug_print("get_lattice_symmetry:\n");
lattice_sym.size = 0;
if (! del_delaunay_reduce(min_lattice, cell_lattice, symprec)) {
goto err;
}
mat_get_metric(metric_orig, min_lattice);
angle_tol = angle_symprec;
for (attempt = 0; attempt < NUM_ATTEMPT; attempt++) {
num_sym = 0;
for (i = 0; i < 26; i++) {
for (j = 0; j < 26; j++) {
for (k = 0; k < 26; k++) {
set_axes(axes, i, j, k);
if (! ((mat_get_determinant_i3(axes) == 1) ||
(mat_get_determinant_i3(axes) == -1))) {
continue;
}
mat_multiply_matrix_di3(lattice, min_lattice, axes);
mat_get_metric(metric, lattice);
if (is_identity_metric(metric, metric_orig, symprec, angle_tol)) {
if (num_sym > 47) {
angle_tol *= ANGLE_REDUCE_RATE;
warning_print("spglib: Too many lattice symmetries was found.\n");
warning_print(" Reduce angle tolerance to %f", angle_tol);
warning_print(" (line %d, %s).\n", __LINE__, __FILE__);
goto next_attempt;
}
mat_copy_matrix_i3(lattice_sym.rot[num_sym], axes);
num_sym++;
}
}
}
}
if (num_sym < 49 || angle_tol < 0) {
lattice_sym.size = num_sym;
return transform_pointsymmetry(&lattice_sym, cell_lattice, min_lattice);
}
next_attempt:
;
}
err:
debug_print("get_lattice_symmetry failed.\n");
return lattice_sym;
}
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec,
const double angle_symprec)
{
int i, j, k;
int elem_sets[3][2] = {{0, 1},
{0, 2},
{1, 2}};
double cos1, cos2, x, length_ave2, sin_dtheta2;
double length_orig[3], length_rot[3];
for (i = 0; i < 3; i++) {
length_orig[i] = sqrt(metric_orig[i][i]);
length_rot[i] = sqrt(metric_rotated[i][i]);
if (mat_Dabs(length_orig[i] - length_rot[i]) > symprec) {
goto fail;
}
}
for (i = 0; i < 3; i++) {
j = elem_sets[i][0];
k = elem_sets[i][1];
if (angle_symprec > 0) {
if (mat_Dabs(get_angle(metric_orig, j, k) -
get_angle(metric_rotated, j, k)) > angle_symprec) {
goto fail;
}
} else {
/* dtheta = arccos(cos(theta1) - arccos(cos(theta2))) */
/* = arccos(c1) - arccos(c2) */
/* = arccos(c1c2 + sqrt((1-c1^2)(1-c2^2))) */
/* sin(dtheta) = sin(arccos(x)) = sqrt(1 - x^2) */
cos1 = metric_orig[j][k] / length_orig[j] / length_orig[k];
cos2 = metric_rotated[j][k] / length_rot[j] / length_rot[k];
x = cos1 * cos2 + sqrt(1 - cos1 * cos1) * sqrt(1 - cos2 * cos2);
sin_dtheta2 = 1 - x * x;
length_ave2 = ((length_orig[j] + length_rot[j]) *
(length_orig[k] + length_rot[k])) / 4;
if (sin_dtheta2 > 1e-12) {
if (sin_dtheta2 * length_ave2 > symprec * symprec) {
goto fail;
}
}
}
}
return 1;
fail:
return 0;
}
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j)
{
double length_i, length_j;
length_i = sqrt(metric[i][i]);
length_j = sqrt(metric[j][j]);
return acos(metric[i][j] / length_i / length_j) / PI * 180;
}
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * lat_sym_orig,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3])
{
int i, size;
double trans_mat[3][3], inv_mat[3][3], drot[3][3];
PointSymmetry lat_sym_new;
lat_sym_new.size = 0;
mat_inverse_matrix_d3(inv_mat, original_lattice, 0);
mat_multiply_matrix_d3(trans_mat, inv_mat, new_lattice);
size = 0;
for (i = 0; i < lat_sym_orig->size; i++) {
mat_cast_matrix_3i_to_3d(drot, lat_sym_orig->rot[i]);
mat_get_similar_matrix_d3(drot, drot, trans_mat, 0);
/* new_lattice may have lower point symmetry than original_lattice.*/
/* The operations that have non-integer elements are not counted. */
if (mat_is_int_matrix(drot, mat_Dabs(mat_get_determinant_d3(trans_mat)) / 10)) {
mat_cast_matrix_3d_to_3i(lat_sym_new.rot[size], drot);
if (abs(mat_get_determinant_i3(lat_sym_new.rot[size])) != 1) {
warning_print("spglib: A point symmetry operation is not unimodular.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
goto err;
}
size++;
}
}
#ifdef SPGWARNING
if (! (lat_sym_orig->size == size)) {
warning_print("spglib: Some of point symmetry operations were dropped.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
}
#endif
lat_sym_new.size = size;
return lat_sym_new;
err:
return lat_sym_new;
}
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3)
{
int i;
for (i = 0; i < 3; i++) {axes[i][0] = relative_axes[a1][i]; }
for (i = 0; i < 3; i++) {axes[i][1] = relative_axes[a2][i]; }
for (i = 0; i < 3; i++) {axes[i][2] = relative_axes[a3][i]; }
}
|
prob2.c | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
int32_t main()
{
size_t n = 10000000;
clock_t tic, toc;
// The usual reduction
tic = clock();
size_t sum1 = 0;
#pragma omp parallel for num_threads( 8 ) reduction(+:sum1)
for( size_t i = 0; i < n; ++i )
{
sum1 += i;
}
toc = clock();
double elapsed = (double)(toc - tic) / CLOCKS_PER_SEC;
printf( "reduced sum: %zu\n", sum1 );
printf( "elapsed time: %fs\n", elapsed );
// A naive use of #pragma omp critical
tic = clock();
size_t sum2 = 0;
size_t local_sum = 0;
#pragma omp parallel for num_threads( 8 ) private( local_sum )
for( size_t i = 0; i < n; ++i )
{
local_sum = i;
// This is wrong. It's essentially sequential with a lot of overhead.
#pragma omp critical
sum2 += local_sum;
}
toc = clock();
elapsed = (double)(toc - tic) / CLOCKS_PER_SEC;
printf( "naive sum: %zu\n", sum2 );
printf( "elapsed time: %fs\n", elapsed );
// The better way
tic = clock();
size_t sum3 = 0;
#pragma omp parallel num_threads( 8 )
{
size_t local_sum = 0;
#pragma omp for
for(size_t i = 0; i < n; ++i )
{
local_sum += i;
}
#pragma omp critical
sum3 += local_sum;
}
toc = clock();
elapsed = (double)(toc - tic) / CLOCKS_PER_SEC;
printf( "critical sum: %zu\n", sum3 );
printf( "elapsed time: %fs\n", elapsed );
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 128
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (x+1),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
*mvg_info->extent=(size_t) extent;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(status == 0 ? MagickFalse : MagickTrue);
}
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if ((strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-'))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (draw_info->compliance != SVGCompliance)
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (draw_info->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(draw_info->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
const char
*clip_path;
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
clip_path=(const char *) GetValueFromSplayTree(macros,name);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,name,clip_path);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (MaxBezierCoordinates/4))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (draw_info->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(status == 0 ? MagickFalse : MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if ((coordinates > (double) SSIZE_MAX) ||
(coordinates > (double) GetMaxMemoryRequest()))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
if ((fabs(start.x-end.x) < MagickEpsilon) ||
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->coordinates=0;
return(MagickTrue);
}
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_reduce_build_template.c | //------------------------------------------------------------------------------
// GB_build_template: T=build(S), and assemble any duplicate tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This template is used in GB_builder and the Generated/GB_red_build__*
// workers. This is the same for both vectors and matrices, since this step is
// agnostic about which vectors the entries appear.
{
// k unused for some uses of this template
#include "GB_unused.h"
if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates, just permute S into Tx
//----------------------------------------------------------------------
// If no duplicates are present, then GB_builder has already
// transplanted I_work into T->i, so this step does not need to
// construct T->i. The tuple values, in S, are copied or permuted into
// T->x.
if (K_work == NULL)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
for (int64_t t = tstart ; t < tend ; t++)
{
// Tx [t] = (ttype) S [t] ; with typecast
GB_CAST_ARRAY_TO_ARRAY (Tx, t, S, t) ;
}
}
}
else
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
for (int64_t t = tstart ; t < tend ; t++)
{
// Tx [t] = (ttype) S [K_work [t]] ; with typecast
GB_CAST_ARRAY_TO_ARRAY (Tx, t, S, K_work [t]) ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// assemble duplicates
//----------------------------------------------------------------------
// Entries in S must be copied into T->x, with any duplicates summed
// via the operator. T->i must also be constructed.
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
// find the first unique tuple owned by this slice
int64_t t ;
for (t = tstart ; t < tend ; t++)
{
// get the tuple and break if it is not a duplicate
if (I_work [t] >= 0) break ;
}
// scan all tuples and assemble any duplicates
for ( ; t < tend ; t++)
{
// get the t-th tuple, a unique tuple
int64_t i = I_work [t] ;
int64_t k = (K_work == NULL) ? t : K_work [t] ;
ASSERT (i >= 0) ;
// Tx [my_tnz] = S [k] ; with typecast
GB_CAST_ARRAY_TO_ARRAY (Tx, my_tnz, S, k) ;
Ti [my_tnz] = i ;
// assemble all duplicates that follow it. This may assemble
// the first duplicates in the next slice(s) (up to but not
// including the first unique tuple in the subsequent slice(s)).
for ( ; t+1 < nvals && I_work [t+1] < 0 ; t++)
{
// assemble the duplicate tuple
int64_t k = (K_work == NULL) ? (t+1) : K_work [t+1] ;
// Tx [my_tnz] += S [k] with typecast
GB_ADD_CAST_ARRAY_TO_ARRAY (Tx, my_tnz, S, k) ;
}
my_tnz++ ;
}
}
}
}
|
2d.pluto-par.c |
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define tmax TVAL
#define nx NXVAL
#define ny NYVAL
double ex[nx][ny+1];
double ey[nx+1][ny];
double hz[nx][ny];
void init_arrays()
{
int i1, i2;
for (i1=0; i1<nx; i1++)
for (i2=0; i2<ny+1; i2++)
ex[i1][i2] = (i1+i2) % 5 + 1;
for (i1=0; i1<nx+1; i1++)
for (i2=0; i2<ny; i2++)
ey[i1][i2] = (i1+i2) % 5 + 1;
for (i1=0; i1<nx; i1++)
for (i2=0; i2<ny; i2++)
hz[i1][i2] = (i1+i2) % 5 + 1;
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
int t, i, j, k, l, m, n, ii, jj;
#define S1(zT0,zT1,t,j) {ey[0][j]=t;}
#define S2(zT0,zT1,zT2,t,i,j) {ey[i][j]=ey[i][j]-((double)(1))/2*(hz[i][j]-hz[i-1][j]);}
#define S3(zT0,zT1,zT2,t,i,j) {ex[i][j]=ex[i][j]-((double)(1))/2*(hz[i][j]-hz[i][j-1]);}
#define S4(zT0,zT1,zT2,t,i,j) {hz[i][j]=hz[i][j]-((double)(7))/10*(ey[1+i][j]+ex[i][1+j]-ex[i][j]-ey[i][j]);}
int c1, c2, c3, c4, c5, c6, c7;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
for (c1=-1;c1<=floord(2*tmax+ny-2,32);c1++) {
lb1=max(max(ceild(32*c1-tmax+1,32),ceild(32*c1-31,64)),0);
ub1=min(min(floord(32*c1+ny+31,64),floord(tmax+ny-1,32)),floord(32*c1+31,32));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(max(max(max(ceild(32*c2-ny-30,32),0),ceild(64*c1-96*c2-61,32)),ceild(32*c1-32*c2-31,32)),ceild(32*c1-1024*c2-1891,992));c3<=min(min(floord(32*c2+nx+30,32),floord(tmax+nx-1,32)),floord(32*c1-32*c2+nx+31,32));c3++) {
if ((c1 <= floord(32*c2+32*c3-nx,32)) && (c2 <= floord(32*c3-nx+ny,32)) && (c3 >= ceild(nx,32))) {
for (c5=max(32*c2,32*c3-nx+1);c5<=min(32*c3-nx+ny,32*c2+31);c5++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,32*c3-nx,nx-1,-32*c3+c5+nx-1) ;
}
}
if ((c1 <= floord(64*c2-ny,32)) && (c2 >= max(ceild(32*c3-nx+ny+1,32),ceild(ny,32)))) {
for (c6=max(32*c3,32*c2-ny+1);c6<=min(32*c2+nx-ny,32*c3+31);c6++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,32*c2-ny,-32*c2+c6+ny-1,ny-1) ;
}
}
if (c1 == c2+c3) {
for (c4=max(max(32*c2-ny+1,0),32*c3);c4<=min(min(32*c3+30,32*c2-ny+31),tmax-1);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
for (c6=c4+1;c6<=32*c3+31;c6++) {
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,ny-1) ;
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(32*c3,0),32*c2-ny+32);c4<=min(min(32*c3+30,tmax-1),32*c2-1);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(32*c2,32*c3),0);c4<=min(min(32*c2+30,32*c3+30),tmax-1);c4++) {
S1(c1-c2,-c1+2*c2,c4,0) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,0) ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
}
}
for (c4=max(max(max(32*c1-32*c2,0),32*c2-ny+1),32*c3-nx+1);c4<=min(min(min(32*c3-nx+31,32*c1-32*c2+31),tmax-1),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,nx-1,-c4+c5-1) ;
}
for (c6=32*c3;c6<=c4+nx;c6++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,ny-1) ;
}
}
for (c4=max(max(max(0,32*c3-nx+1),32*c1-32*c2),32*c2-ny+32);c4<=min(min(min(tmax-1,32*c1-32*c2+31),32*c2-1),32*c3-nx+31);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,nx-1,-c4+c5-1) ;
}
}
for (c4=max(max(max(32*c3-nx+32,32*c1-32*c2),0),32*c2-ny+1);c4<=min(min(min(32*c3-1,32*c1-32*c2+31),tmax-1),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
for (c6=32*c3;c6<=32*c3+31;c6++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,ny-1) ;
}
}
for (c4=max(max(max(32*c2,0),32*c3-nx+1),32*c1-32*c2);c4<=min(min(min(tmax-1,32*c1-32*c2+31),32*c2+30),32*c3-nx+31);c4++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,0) ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,nx-1,-c4+c5-1) ;
}
}
for (c4=max(max(max(0,32*c1-32*c2),32*c3-nx+32),32*c2-ny+32);c4<=min(min(min(32*c3-1,tmax-1),32*c1-32*c2+31),32*c2-1);c4++) {
/*@ begin Loop(
transform Composite(
tile = [('c5',T1,'ii'),('c6',T2,'jj')],
permut = [PERMUTS],
unrolljam = [('c5',U1),('c6',U2)],
vector = (VEC, ['ivdep','vector always'])
)
for (c5=32*c2;c5<=32*c2+31;c5++)
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
) @*/{
for (c5=32*c2; c5<=32*c2+24; c5=c5+8) {
for (c6=32*c3; c6<=32*c3+24; c6=c6+8) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+7);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5+7);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5+6);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5+6);
}
for (; c6<=32*c3+31; c6=c6+1) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+1);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+2);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+3);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+4);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+6);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+7);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+1);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+2);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+3);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+4);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+6);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5+7);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+2);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+3);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+4);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5+6);
}
}
for (; c5<=32*c2+31; c5=c5+1) {
for (c6=32*c3; c6<=32*c3+24; c6=c6+8) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5);
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+7,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+1,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+2,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+3,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+4,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+5,-c4+c5-1);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6+6,-c4+c5-1);
}
for (; c6<=32*c3+31; c6=c6+1) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5);
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1);
}
}
}
/*@ end @*/
}
for (c4=max(max(max(32*c2,32*c3-nx+32),0),32*c1-32*c2);c4<=min(min(min(32*c3-1,tmax-1),32*c1-32*c2+31),32*c2+30);c4++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,0) ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(64*c3-1,32),floord(32*c3+tmax-32,32)))) {
S1(c3,c1-2*c3,32*c1-32*c3+31,0) ;
for (c6=32*c1-32*c3+32;c6<=32*c3+31;c6++) {
S2(c3,0,c1-2*c3,32*c1-32*c3+31,-32*c1+32*c3+c6-31,0) ;
}
}
if ((-c1 == -c2-c3) && (c1 >= ceild(64*c2-31,32)) && (c1 <= min(floord(32*c2+tmax-32,32),floord(64*c2-1,32)))) {
S1(c1-c2,-c1+2*c2,32*c1-32*c2+31,0) ;
for (c5=32*c1-32*c2+32;c5<=32*c2+31;c5++) {
S1(c1-c2,-c1+2*c2,32*c1-32*c2+31,-32*c1+32*c2+c5-31) ;
S3(c1-c2,0,-c1+2*c2,32*c1-32*c2+31,0,-32*c1+32*c2+c5-31) ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c2+tmax-32,32),2*c2-1))) {
for (c5=32*c2;c5<=min(32*c2+31,32*c1-32*c2+ny+30);c5++) {
S1(c1-c2,-c1+2*c2,32*c1-32*c2+31,-32*c1+32*c2+c5-31) ;
S3(c1-c2,0,-c1+2*c2,32*c1-32*c2+31,0,-32*c1+32*c2+c5-31) ;
}
}
if ((-c1 == -2*c2) && (-c1 == -2*c3) && (c1 <= floord(tmax-32,16))) {
if (c1%2 == 0) {
S1(c1/2,0,16*c1+31,0) ;
}
}
if ((c1 >= 2*c2) && (c2 <= min(c3-1,floord(tmax-32,32)))) {
for (c6=32*c3;c6<=min(32*c3+31,32*c2+nx+30);c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,32*c2+31,-32*c2+c6-31,0) ;
}
}
}
}
}
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
printf("%f\n", annot_t_total);
return 1;
}
|
libomp_interface.h | // This file does not contain any code; it just contains additional text and formatting
// for doxygen.
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
/*! @mainpage LLVM OpenMP* Runtime Library Interface
@section sec_intro Introduction
This document describes the interface provided by the
LLVM OpenMP\other runtime library to the compiler.
Routines that are directly called as simple functions by user code are
not currently described here, since their definition is in the OpenMP
specification available from http://openmp.org
The aim here is to explain the interface from the compiler to the runtime.
The overall design is described, and each function in the interface
has its own description. (At least, that's the ambition, we may not be there yet).
@section sec_building Quickly Building the Runtime
For the impatient, we cover building the runtime as the first topic here.
CMake is used to build the OpenMP runtime. For details and a full list of options for the CMake build system,
see <tt>README.rst</tt> in the source code repository. These instructions will provide the most typical build.
In-LLVM-tree build:.
@code
$ cd where-you-want-to-live
Check out openmp into llvm/projects
$ cd where-you-want-to-build
$ mkdir build && cd build
$ cmake path/to/llvm -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler>
$ make omp
@endcode
Out-of-LLVM-tree build:
@code
$ cd where-you-want-to-live
Check out openmp
$ cd where-you-want-to-live/openmp
$ mkdir build && cd build
$ cmake path/to/openmp -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler>
$ make
@endcode
@section sec_supported Supported RTL Build Configurations
The architectures supported are IA-32 architecture, Intel® 64, and
Intel® Many Integrated Core Architecture. The build configurations
supported are shown in the table below.
<table border=1>
<tr><th> <th>icc/icl<th>gcc<th>clang
<tr><td>Linux\other OS<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7)
<tr><td>FreeBSD\other<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7,8)
<tr><td>OS X\other<td>Yes(1,3,4)<td>No<td>Yes(4,6,7)
<tr><td>Windows\other OS<td>Yes(1,4)<td>No<td>No
</table>
(1) On IA-32 architecture and Intel® 64, icc/icl versions 12.x
are supported (12.1 is recommended).<br>
(2) gcc version 4.7 is supported.<br>
(3) For icc on OS X\other, OS X\other version 10.5.8 is supported.<br>
(4) Intel® Many Integrated Core Architecture not supported.<br>
(5) On Intel® Many Integrated Core Architecture, icc/icl versions 13.0 or later are required.<br>
(6) Clang\other version 3.3 is supported.<br>
(7) Clang\other currently does not offer a software-implemented 128 bit extended
precision type. Thus, all entry points reliant on this type are removed
from the library and cannot be called in the user program. The following
functions are not available:
@code
__kmpc_atomic_cmplx16_*
__kmpc_atomic_float16_*
__kmpc_atomic_*_fp
@endcode
(8) Community contribution provided AS IS, not tested by Intel.
Supported Architectures: IBM(R) Power 7 and Power 8
<table border=1>
<tr><th> <th>gcc<th>clang
<tr><td>Linux\other OS<td>Yes(1,2)<td>Yes(3,4)
</table>
(1) On Power 7, gcc version 4.8.2 is supported.<br>
(2) On Power 8, gcc version 4.8.2 is supported.<br>
(3) On Power 7, clang version 3.7 is supported.<br>
(4) On Power 8, clang version 3.7 is supported.<br>
@section sec_frontend Front-end Compilers that work with this RTL
The following compilers are known to do compatible code generation for
this RTL: icc/icl, gcc. Code generation is discussed in more detail
later in this document.
@section sec_outlining Outlining
The runtime interface is based on the idea that the compiler
"outlines" sections of code that are to run in parallel into separate
functions that can then be invoked in multiple threads. For instance,
simple code like this
@code
void foo()
{
#pragma omp parallel
{
... do something ...
}
}
@endcode
is converted into something that looks conceptually like this (where
the names used are merely illustrative; the real library function
names will be used later after we've discussed some more issues...)
@code
static void outlinedFooBody()
{
... do something ...
}
void foo()
{
__OMP_runtime_fork(outlinedFooBody, (void*)0); // Not the real function name!
}
@endcode
@subsection SEC_SHAREDVARS Addressing shared variables
In real uses of the OpenMP\other API there are normally references
from the outlined code to shared variables that are in scope in the containing function.
Therefore the containing function must be able to address
these variables. The runtime supports two alternate ways of doing
this.
@subsubsection SEC_SEC_OT Current Technique
The technique currently supported by the runtime library is to receive
a separate pointer to each shared variable that can be accessed from
the outlined function. This is what is shown in the example below.
We hope soon to provide an alternative interface to support the
alternate implementation described in the next section. The
alternative implementation has performance advantages for small
parallel regions that have many shared variables.
@subsubsection SEC_SEC_PT Future Technique
The idea is to treat the outlined function as though it
were a lexically nested function, and pass it a single argument which
is the pointer to the parent's stack frame. Provided that the compiler
knows the layout of the parent frame when it is generating the outlined
function it can then access the up-level variables at appropriate
offsets from the parent frame. This is a classical compiler technique
from the 1960s to support languages like Algol (and its descendants)
that support lexically nested functions.
The main benefit of this technique is that there is no code required
at the fork point to marshal the arguments to the outlined function.
Since the runtime knows statically how many arguments must be passed to the
outlined function, it can easily copy them to the thread's stack
frame. Therefore the performance of the fork code is independent of
the number of shared variables that are accessed by the outlined
function.
If it is hard to determine the stack layout of the parent while generating the
outlined code, it is still possible to use this approach by collecting all of
the variables in the parent that are accessed from outlined functions into
a single `struct` which is placed on the stack, and whose address is passed
to the outlined functions. In this way the offsets of the shared variables
are known (since they are inside the struct) without needing to know
the complete layout of the parent stack-frame. From the point of view
of the runtime either of these techniques is equivalent, since in either
case it only has to pass a single argument to the outlined function to allow
it to access shared variables.
A scheme like this is how gcc\other generates outlined functions.
@section SEC_INTERFACES Library Interfaces
The library functions used for specific parts of the OpenMP\other language implementation
are documented in different modules.
- @ref BASIC_TYPES fundamental types used by the runtime in many places
- @ref DEPRECATED functions that are in the library but are no longer required
- @ref STARTUP_SHUTDOWN functions for initializing and finalizing the runtime
- @ref PARALLEL functions for implementing `omp parallel`
- @ref THREAD_STATES functions for supporting thread state inquiries
- @ref WORK_SHARING functions for work sharing constructs such as `omp for`, `omp sections`
- @ref THREADPRIVATE functions to support thread private data, copyin etc
- @ref SYNCHRONIZATION functions to support `omp critical`, `omp barrier`, `omp master`, reductions etc
- @ref ATOMIC_OPS functions to support atomic operations
- @ref STATS_GATHERING macros to support developer profiling of libomp
- Documentation on tasking has still to be written...
@section SEC_EXAMPLES Examples
@subsection SEC_WORKSHARING_EXAMPLE Work Sharing Example
This example shows the code generated for a parallel for with reduction and dynamic scheduling.
@code
extern float foo( void );
int main () {
int i;
float r = 0.0;
#pragma omp parallel for schedule(dynamic) reduction(+:r)
for ( i = 0; i < 10; i ++ ) {
r += foo();
}
}
@endcode
The transformed code looks like this.
@code
extern float foo( void );
int main () {
static int zero = 0;
auto int gtid;
auto float r = 0.0;
__kmpc_begin( & loc3, 0 );
// The gtid is not actually required in this example so could be omitted;
// We show its initialization here because it is often required for calls into
// the runtime and should be locally cached like this.
gtid = __kmpc_global thread num( & loc3 );
__kmpc_fork call( & loc7, 1, main_7_parallel_3, & r );
__kmpc_end( & loc0 );
return 0;
}
struct main_10_reduction_t_5 { float r_10_rpr; };
static kmp_critical_name lck = { 0 };
static ident_t loc10; // loc10.flags should contain KMP_IDENT_ATOMIC_REDUCE bit set
// if compiler has generated an atomic reduction.
void main_7_parallel_3( int *gtid, int *btid, float *r_7_shp ) {
auto int i_7_pr;
auto int lower, upper, liter, incr;
auto struct main_10_reduction_t_5 reduce;
reduce.r_10_rpr = 0.F;
liter = 0;
__kmpc_dispatch_init_4( & loc7,*gtid, 35, 0, 9, 1, 1 );
while ( __kmpc_dispatch_next_4( & loc7, *gtid, & liter, & lower, & upper, & incr ) ) {
for( i_7_pr = lower; upper >= i_7_pr; i_7_pr ++ )
reduce.r_10_rpr += foo();
}
switch( __kmpc_reduce_nowait( & loc10, *gtid, 1, 4, & reduce, main_10_reduce_5, & lck ) ) {
case 1:
*r_7_shp += reduce.r_10_rpr;
__kmpc_end_reduce_nowait( & loc10, *gtid, & lck );
break;
case 2:
__kmpc_atomic_float4_add( & loc10, *gtid, r_7_shp, reduce.r_10_rpr );
break;
default:;
}
}
void main_10_reduce_5( struct main_10_reduction_t_5 *reduce_lhs,
struct main_10_reduction_t_5 *reduce_rhs )
{
reduce_lhs->r_10_rpr += reduce_rhs->r_10_rpr;
}
@endcode
@defgroup BASIC_TYPES Basic Types
Types that are used throughout the runtime.
@defgroup DEPRECATED Deprecated Functions
Functions in this group are for backwards compatibility only, and
should not be used in new code.
@defgroup STARTUP_SHUTDOWN Startup and Shutdown
These functions are for library initialization and shutdown.
@defgroup PARALLEL Parallel (fork/join)
These functions are used for implementing <tt>\#pragma omp parallel</tt>.
@defgroup THREAD_STATES Thread Information
These functions return information about the currently executing thread.
@defgroup WORK_SHARING Work Sharing
These functions are used for implementing
<tt>\#pragma omp for</tt>, <tt>\#pragma omp sections</tt>, <tt>\#pragma omp single</tt> and
<tt>\#pragma omp master</tt> constructs.
When handling loops, there are different functions for each of the signed and unsigned 32 and 64 bit integer types
which have the name suffixes `_4`, `_4u`, `_8` and `_8u`. The semantics of each of the functions is the same,
so they are only described once.
Static loop scheduling is handled by @ref __kmpc_for_static_init_4 and friends. Only a single call is needed,
since the iterations to be executed by any give thread can be determined as soon as the loop parameters are known.
Dynamic scheduling is handled by the @ref __kmpc_dispatch_init_4 and @ref __kmpc_dispatch_next_4 functions.
The init function is called once in each thread outside the loop, while the next function is called each
time that the previous chunk of work has been exhausted.
@defgroup SYNCHRONIZATION Synchronization
These functions are used for implementing barriers.
@defgroup THREADPRIVATE Thread private data support
These functions support copyin/out and thread private data.
@defgroup STATS_GATHERING Statistics Gathering from OMPTB
These macros support profiling the libomp library. Use --stats=on when building with build.pl to enable
and then use the KMP_* macros to profile (through counts or clock ticks) libomp during execution of an OpenMP program.
@section sec_stats_env_vars Environment Variables
This section describes the environment variables relevant to stats-gathering in libomp
@code
KMP_STATS_FILE
@endcode
This environment variable is set to an output filename that will be appended *NOT OVERWRITTEN* if it exists. If this environment variable is undefined, the statistics will be output to stderr
@code
KMP_STATS_THREADS
@endcode
This environment variable indicates to print thread-specific statistics as well as aggregate statistics. Each thread's statistics will be shown as well as the collective sum of all threads. The values "true", "on", "1", "yes" will all indicate to print per thread statistics.
@defgroup TASKING Tasking support
These functions support tasking constructs.
@defgroup USER User visible functions
These functions can be called directly by the user, but are runtime library specific, rather than being OpenMP interfaces.
*/
|
parallelForConstruct.c | int main() {
int x = 10;
int a;
#pragma omp parallel for private(a)
for(x = 0; x < 10; x++)
{
a -= 10;
}
x = 20;
}
|
nvector_openmp.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v)
{
return SUNDIALS_NVEC_OPENMP;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads, SUNContext sunctx)
{
N_Vector v;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = N_VNewEmpty(sunctx);
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMP;
v->ops->nvclone = N_VClone_OpenMP;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMP;
v->ops->nvdestroy = N_VDestroy_OpenMP;
v->ops->nvspace = N_VSpace_OpenMP;
v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
v->ops->nvgetlength = N_VGetLength_OpenMP;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMP;
v->ops->nvconst = N_VConst_OpenMP;
v->ops->nvprod = N_VProd_OpenMP;
v->ops->nvdiv = N_VDiv_OpenMP;
v->ops->nvscale = N_VScale_OpenMP;
v->ops->nvabs = N_VAbs_OpenMP;
v->ops->nvinv = N_VInv_OpenMP;
v->ops->nvaddconst = N_VAddConst_OpenMP;
v->ops->nvdotprod = N_VDotProd_OpenMP;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMP;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
v->ops->nvmin = N_VMin_OpenMP;
v->ops->nvwl2norm = N_VWL2Norm_OpenMP;
v->ops->nvl1norm = N_VL1Norm_OpenMP;
v->ops->nvcompare = N_VCompare_OpenMP;
v->ops->nvinvtest = N_VInvTest_OpenMP;
v->ops->nvconstrmask = N_VConstrMask_OpenMP;
v->ops->nvminquotient = N_VMinQuotient_OpenMP;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction kernels */
v->ops->nvdotprodlocal = N_VDotProd_OpenMP;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP;
v->ops->nvminlocal = N_VMin_OpenMP;
v->ops->nvl1normlocal = N_VL1Norm_OpenMP;
v->ops->nvinvtestlocal = N_VInvTest_OpenMP;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP;
/* single buffer reduction operations */
v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMP;
/* XBraid interface operations */
v->ops->nvbufsize = N_VBufSize_OpenMP;
v->ops->nvbufpack = N_VBufPack_OpenMP;
v->ops->nvbufunpack = N_VBufUnpack_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->num_threads = num_threads;
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(sunindextype length, int num_threads, SUNContext sunctx)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads, sunctx);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads, SUNContext sunctx)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads, sunctx);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNFALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
return(N_VCloneVectorArray(count, w));
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
return(N_VCloneEmptyVectorArray(count, w));
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count)
{
N_VDestroyVectorArray(vs, count);
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMP(N_Vector v)
{
return NV_LENGTH_OMP(v);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMP(N_Vector x)
{
N_VPrintFile_OpenMP(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%11.8g\n", xd[i]);
#else
fprintf(outfile, "%11.8g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty(w->sunctx);
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
sunindextype length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data array if it's owned by the vector */
if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd;
i = 0; /* initialize to suppress clang warning */
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd, *yd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype tmax, max, *xd;
i = 0; /* initialize to suppress clang warning */
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype min, *xd;
realtype tmin;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
booleantype test;
i = 0; /* initialize to suppress clang warning */
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ZERO;
#pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \
schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
/* Continue if no constraints were set for the variable */
if (cd[i] == ZERO)
continue;
/* Check if a set constraint has been violated */
test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) ||
(SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO);
if (test) {
temp = md[i] = ONE; /* Here is a race to write to temp */
}
}
/* Return false if any constraint was violated */
return (temp == ONE) ? SUNFALSE : SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd, *dd, min, tmin, val;
i = 0; /* initialize to suppress clang warning */
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd, *wd, *idd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(sum);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] *= c[0];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
xd = NV_DATA_OMP(X[0]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[0] * xd[j];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,yd) shared(nvec,Y,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
yd[j] += a[i] * xd[j];
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,yd,zd) shared(nvec,Y,Z,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a[i] * xd[j] + yd[j];
}
}
}
return(0);
}
int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i;
sunindextype j, N;
realtype sum;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMP(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* compute multiple dot products */
#pragma omp parallel default(none) private(i,j,yd,sum) shared(nvec,Y,N,xd,dotprods) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += xd[j] * yd[j];
}
#pragma omp critical
{
dotprods[i] += sum;
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMP(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
realtype c;
N_Vector* V1;
N_Vector* V2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMP(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMP(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMP(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMP(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a,b) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a * xd[j] + b * yd[j];
}
}
}
return(0);
}
int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
xd[j] *= c[i];
}
}
}
return(0);
}
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp parallel default(none) private(i,j,xd,zd) shared(nvec,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[i] * xd[j];
}
}
}
return(0);
}
int N_VConstVectorArray_OpenMP(int nvec, realtype c, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMP(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* set each vector in the vector array to a constant */
#pragma omp parallel default(none) private(i,j,zd) shared(nvec,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c;
}
}
}
return(0);
}
int N_VWrmsNormVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMP(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
realtype* idd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMP(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMP(X[0]);
idd = NV_DATA_OMP(id);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,idd,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
if (idd[j] > ZERO)
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j;
sunindextype k, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMP(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector*) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMP(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
yd[k] += a[j] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,k,xd,yd,zd) shared(nvec,nsum,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
zd = NV_DATA_OMP(Z[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = a[j] * xd[k] + yd[k];
}
}
}
}
return(0);
}
int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd=NULL;
realtype* xd=NULL;
realtype* ctmp;
N_Vector* Y;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMP(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMP(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMP(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMP(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMP(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] *= c[0];
}
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd = NV_DATA_OMP(X[0][j]);
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = c[0] * xd[k];
}
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* OPTIONAL XBraid interface operations
* -----------------------------------------------------------------
*/
int N_VBufSize_OpenMP(N_Vector x, sunindextype *size)
{
if (x == NULL) return(-1);
*size = NV_LENGTH_OMP(x) * ((sunindextype)sizeof(realtype));
return(0);
}
int N_VBufPack_OpenMP(N_Vector x, void *buf)
{
sunindextype i, N;
realtype *xd = NULL;
realtype *bd = NULL;
if (x == NULL || buf == NULL) return(-1);
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
bd = (realtype*) buf;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
bd[i] = xd[i];
return(0);
}
int N_VBufUnpack_OpenMP(N_Vector x, void *buf)
{
sunindextype i, N;
realtype *xd = NULL;
realtype *bd = NULL;
if (x == NULL || buf == NULL) return(-1);
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
bd = (realtype*) buf;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
xd[i] = bd[i];
return(0);
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd, *yd;
i = 0; /* initialize to suppress clang warning */
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] + yd[j];
}
}
return(0);
}
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] - yd[j];
}
}
return(0);
}
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] + yd[j]);
}
}
return(0);
}
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] - yd[j]);
}
}
return(0);
}
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) + yd[j];
}
}
return(0);
}
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) - yd[j];
}
}
return(0);
}
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
if (a == ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += xd[j];
}
}
return(0);
}
if (a == -ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] -= xd[j];
}
}
return(0);
}
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += a * xd[j];
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
/* enable single buffer reduction operations */
v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMP;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
/* disable single buffer reduction operations */
v->ops->nvdotprodmultilocal = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf) {
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMP;
} else {
v->ops->nvdotprodmulti = NULL;
v->ops->nvdotprodmultilocal = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
TransformController.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include <set>
#include "Transformer.h"
#include "SequenceEnumerator.h"
#include "ExceptionCapture.h"
namespace Microsoft { namespace MSR { namespace CNTK {
// A pair of a transformer and the stream name to which the transformer should be a applied.
struct Transformation
{
TransformerPtr m_transformer;
std::wstring m_streamName;
};
// A class responsible for applying a list of transformers to sequences and stream descriptions.
// Delegates retrieving of sequences to another sequence provider(such as randomizer) and applies transformations after retrieving.
// Usually used by the packer to get next set of sequences.
class TransformController : public SequenceEnumerator
{
public:
TransformController(const std::vector<Transformation>& transformations, SequenceEnumeratorPtr sequenceProvider)
: m_sequenceProvider(sequenceProvider)
{
// Applying transformations to stream descriptions,
// i.e. a transformation can change a stream from dense to sparse.
std::vector<StreamDescriptionPtr> transformedStreams = m_sequenceProvider->GetStreamDescriptions();
for (auto& t : transformations)
{
size_t streamId = GetStreamId(t.m_streamName, transformedStreams);
m_transformations.push_back(std::make_pair(t, streamId));
transformedStreams[streamId] = std::make_shared<StreamDescription>(t.m_transformer->Transform(*transformedStreams[streamId]));
}
m_outputStreams = transformedStreams;
}
// Returns current position in the global timeline. The returned value is in samples.
size_t GetCurrentSamplePosition() override
{
return m_sequenceProvider->GetCurrentSamplePosition();
}
// Sets configuration for the current epoch.
// Some transformers can change their config based on the epoch.
virtual void StartEpoch(const EpochConfiguration &config) override
{
assert(m_sequenceProvider != nullptr);
for (auto& t : m_transformations)
{
t.first.m_transformer->StartEpoch(config);
}
m_sequenceProvider->StartEpoch(config);
}
void SetCurrentSamplePosition(size_t currentSamplePosition) override
{
m_sequenceProvider->SetCurrentSamplePosition(currentSamplePosition);
}
// Description of streams that the transformer provides.
virtual std::vector<StreamDescriptionPtr> GetStreamDescriptions() const override
{
return m_outputStreams;
}
// Gets next sequences up to a maximum count of samples,
// applying transformers to particular streams.
virtual Sequences GetNextSequences(size_t globalSampleCount, size_t localSampleCount) override
{
assert(m_sequenceProvider != nullptr);
Sequences sequences = m_sequenceProvider->GetNextSequences(globalSampleCount, localSampleCount);
if (sequences.m_data.empty())
{
return sequences;
}
ExceptionCapture capture;
#pragma omp parallel for schedule(dynamic)
for (int j = 0; j < sequences.m_data.front().size(); ++j)
{
capture.SafeRun([this, &sequences](int sequenceId)
{
for (auto& t : m_transformations)
{
sequences.m_data[t.second][sequenceId] = t.first.m_transformer->Transform(sequences.m_data[t.second][sequenceId]);
}
}, j);
}
capture.RethrowIfHappened();
return sequences;
}
void SetConfiguration(const ReaderConfiguration& config) override
{
m_sequenceProvider->SetConfiguration(config);
}
private:
size_t GetStreamId(const std::wstring streamName, const std::vector<StreamDescriptionPtr>& streams) const
{
for (const auto& s : streams)
{
if (s->m_name == streamName)
{
return s->m_id;
}
}
assert(false);
LogicError("Unexpected stream specified for transformation.");
}
SequenceEnumeratorPtr m_sequenceProvider;
std::vector<StreamDescriptionPtr> m_outputStreams;
std::vector<std::pair<Transformation, size_t>> m_transformations;
};
}}}
|
ift.c | /*****************************************************************************\
* ift.c
*
* AUTHOR : Felipe Belem (Org.) and Alexandre Falcao et. al.
* DATE : 2021-02-05
* LICENSE : MIT License
* COPYRIGHT : Alexandre Xavier Falcao 2012-2021
* EMAIL : felipe.belem@ic.unicamp.br
* afalcao@ic.unicamp.br
\*****************************************************************************/
#include "ift.h"
// ---------- iftBasicDataTypes.c start
void iftCopyVoxel(iftVoxel *src, iftVoxel *dst)
{
(*dst).x = (*src).x;
(*dst).y = (*src).y;
(*dst).z = (*src).z;
}
// ---------- iftBasicDataTypes.c end
// ---------- iftIntArray.c start
iftIntArray *iftCreateIntArray(long n)
{
iftIntArray *iarr = (iftIntArray*) iftAlloc(1, sizeof(iftIntArray));
iarr->n = n;
iarr->val = iftAllocIntArray(n);
return iarr;
}
void iftDestroyIntArray(iftIntArray **iarr)
{
if (iarr != NULL && *iarr != NULL) {
iftIntArray *iarr_aux = *iarr;
if (iarr_aux->val != NULL)
iftFree(iarr_aux->val);
iftFree(iarr_aux);
*iarr = NULL;
}
}
void iftShuffleIntArray(int* array, int n)
{
// Start from the last element and swap one by one. We don't
// need to run for the first element that's why i > 0
int j;
for (int i = n-1; i > 0; i--)
{
// Pick a random index from 0 to i
j = rand() % (i+1);
// Swap arr[i] with the element at random index
iftSwap(array[i],array[j]);
}
}
// ---------- iftIntArray.c end
// ---------- iftFloatArray.c start
iftFloatArray *iftCreateFloatArray(long n)
{
iftFloatArray *darr = (iftFloatArray *) iftAlloc(1, sizeof(iftFloatArray));
darr->n = n;
darr->val = iftAllocFloatArray(n);
return darr;
}
void iftDestroyFloatArray(iftFloatArray **darr)
{
if (darr != NULL && *darr != NULL) {
iftFloatArray *darr_aux = *darr;
if (darr_aux->val != NULL)
iftFree(darr_aux->val);
iftFree(darr_aux);
*darr = NULL;
}
}
// ---------- iftFloatArray.c end
// ---------- iftCommon.c start
float iftRandomUniform(float low, float high) {
double d;
d = ((double) rand()) / ((double) RAND_MAX);
return low + d * (high - low);
}
int iftRandomInteger (int low, int high){
int k;
double d;
d = (double) rand () / ((double) RAND_MAX + 0.5);
k = iftMin((int)(d * (high - low + 1.0) ) + low, high);
return k;
}
int *iftRandomIntegers(int low, int high, int nelems) {
char msg[512];
if (low > high) {
sprintf(msg, "Low is greater than High: (%d, %d)", low, high);
iftError(msg, "iftRandomIntegers");
}
int total_of_elems = (high - low + 1);
if (nelems > total_of_elems) {
sprintf(msg, "Nelems = %d is greater than the total of integer number in the range: [%d, %d]",
nelems, low, high);
iftError(msg, "iftRandomIntegers");
}
int *selected = iftAllocIntArray(nelems);
int *values = iftAllocIntArray(total_of_elems);
int *count = iftAllocIntArray(total_of_elems);
int t = 0;
for (int i = low; i <= high; i++) {
values[t] = i;
count[t] = 100;
t++;
}
if (nelems == total_of_elems) {
iftFree(count);
iftFree(selected);
return values;
}
// Randomly select samples
t = 0;
int roof = total_of_elems - 1;
while (t < nelems) {
int i = iftRandomInteger(0, roof);
int v = values[i];
if (count[i] == 0) {
selected[t] = v;
iftSwap(values[i], values[roof]);
iftSwap(count[i], count[roof]);
t++;
roof--;
} else {
count[i]--;
}
}
iftFree(values);
iftFree(count);
return selected;
}
void iftSkipComments(FILE *fp)
{
//skip for comments
while (fgetc(fp) == '#') {
while (fgetc(fp) != '\n');
}
fseek(fp,-1,SEEK_CUR);
}
double iftLog(double val, double base)
{
return (log(val) / log(base));
}
long iftNormalizationValue(long maxval)
{
long norm_val = 1;
if (maxval < 0)
iftError("Input value %ld < 0", "iftNormalizationValue", maxval);
else if (maxval <= 1)
norm_val = 1;
else if (maxval <= 255)
norm_val = 255;
else if (maxval <= 4095)
norm_val = 4095;
else if (maxval <= 65535)
norm_val = 65535;
else if (maxval <= 4294967295)
norm_val = 4294967295;
else iftError("Invalid maxval number %ld with number of bits > 32. It only supports values within [0, 2ˆn_bits -1], " \
"where n_bits in {1, 8, 12, 16, 32}", "iftNormalizationValue", maxval);
return norm_val;
}
void iftRandomSeed(unsigned int seed)
{
srand(seed);
}
timer *iftTic()
{
timer *tic=NULL;
tic = (timer *)iftAlloc(1, sizeof(timer));
gettimeofday(tic,NULL);
return(tic);
}
timer *iftToc()
{
timer *toc=NULL;
toc = (timer *)iftAlloc(1, sizeof(timer));
gettimeofday(toc,NULL);
return(toc);
}
float iftCompTime(timer *tic, timer *toc)
{
float t=0.0;
if ((tic!=NULL)&&(toc!=NULL)){
t = (toc->tv_sec-tic->tv_sec)*1000.0 +
(toc->tv_usec-tic->tv_usec)*0.001;
iftFree(tic);iftFree(toc);
}
return(t);
}
// ---------- iftCommon.c end
// ---------- iftAdjacency.c start
iftAdjRel *iftCreateAdjRel(int n)
{
iftAdjRel *A=(iftAdjRel *)iftAlloc(1,sizeof(iftAdjRel ));
A->dx = (int *)iftAllocIntArray(n);
A->dy = (int *)iftAllocIntArray(n);
A->dz = (int *)iftAllocIntArray(n);
A->n = n;
return(A);
}
void iftDestroyAdjRel(iftAdjRel **A)
{
iftAdjRel *aux = *A;
if (aux != NULL){
if (aux->dx != NULL) iftFree(aux->dx);
if (aux->dy != NULL) iftFree(aux->dy);
if (aux->dz != NULL) iftFree(aux->dz);
iftFree(aux);
*A = NULL;
}
}
iftAdjRel *iftSpheric(float r)
{
int r0 = (int) r;
float r2 = (int) (r*r + 0.5);
int n = 0;
for (int dz = -r0; dz <= r0; dz++)
for (int dy = -r0; dy <= r0; dy++)
for (int dx =- r0; dx <= r0; dx++)
if ( ((dx*dx) + (dy*dy) + (dz*dz)) <= r2)
n++;
iftAdjRel *A = iftCreateAdjRel(n);
int i = 0;
int i0 = 0;
for (int dz = -r0; dz <= r0; dz++)
for (int dy = -r0; dy <= r0; dy++)
for (int dx = -r0; dx <= r0; dx++)
if ( ((dx*dx) + (dy*dy) + (dz*dz)) <= r2) {
A->dx[i] = dx;
A->dy[i] = dy;
A->dz[i] = dz;
if ((dx == 0) && (dy == 0) && (dz == 0))
i0 = i;
i++;
}
// shift to right and place central voxel at first
for (int i = i0; i > 0; i--) {
int dx = A->dx[i];
int dy = A->dy[i];
int dz = A->dz[i];
A->dx[i] = A->dx[i-1];
A->dy[i] = A->dy[i-1];
A->dz[i] = A->dz[i-1];
A->dx[i-1] = dx;
A->dy[i-1] = dy;
A->dz[i-1] = dz;
}
// sort by radius, so the 6 closest neighbors will come first
float *dr = iftAllocFloatArray(A->n);
for (int i = 0; i < A->n; i++)
dr[i] = A->dx[i]*A->dx[i] + A->dy[i]*A->dy[i] + A->dz[i]*A->dz[i];
iftIntArray *idxs = iftIntRange(0, A->n-1, 1);
iftFQuickSort(dr, idxs->val, 0, A->n-1, IFT_INCREASING);
iftAdjRel *Asort = iftCreateAdjRel(A->n);
for (int i = 0; i < A->n; i++) {
int idx = idxs->val[i];
Asort->dx[i] = A->dx[idx];
Asort->dy[i] = A->dy[idx];
Asort->dz[i] = A->dz[idx];
}
iftFree(dr);
iftDestroyIntArray(&idxs);
iftDestroyAdjRel(&A);
return Asort;
}
iftAdjRel *iftCircular(float r)
{
int r0 = (int) r;
float r2 = (int) (r*r + 0.5);
int n = 0;
for (int dy = -r0; dy <= r0; dy++)
for (int dx = -r0; dx <= r0; dx++)
if (((dx*dx) + (dy*dy)) <= r2)
n++;
iftAdjRel *A = iftCreateAdjRel(n);
int i = 0;
int i0 = 0;
for (int dy = -r0; dy <= r0; dy++)
for (int dx = -r0; dx <= r0; dx++)
if (((dx*dx) + (dy*dy)) <= r2) {
A->dx[i] = dx;
A->dy[i] = dy;
A->dz[i] = 0;
if ((dx==0) && (dy==0))
i0 = i;
i++;
}
// shift to right and place central pixel at first
for (int i = i0; i > 0; i--) {
int dx = A->dx[i];
int dy = A->dy[i];
A->dx[i] = A->dx[i-1];
A->dy[i] = A->dy[i-1];
A->dx[i-1] = dx;
A->dy[i-1] = dy;
}
// sort by radius, so the 4 closest neighbors will come first
float *dr = iftAllocFloatArray(A->n);
for (int i = 0; i < A->n; i++)
dr[i] = A->dx[i]*A->dx[i] + A->dy[i]*A->dy[i];
iftIntArray *idxs = iftIntRange(0, A->n-1, 1);
iftFQuickSort(dr, idxs->val, 0, A->n-1, IFT_INCREASING);
iftAdjRel *Asort = iftCreateAdjRel(A->n);
for (int i = 0; i < A->n; i++) {
int idx = idxs->val[i];
Asort->dx[i] = A->dx[idx];
Asort->dy[i] = A->dy[idx];
Asort->dz[i] = A->dz[idx];
}
iftFree(dr);
iftDestroyIntArray(&idxs);
iftDestroyAdjRel(&A);
return Asort;
}
iftAdjRel *iftCopyAdjacency(const iftAdjRel *A)
{
iftAdjRel *B = iftCreateAdjRel(A->n);
int i;
for (i=0; i < A->n; i++) {
B->dx[i] = A->dx[i];
B->dy[i] = A->dy[i];
B->dz[i] = A->dz[i];
}
return(B);
}
inline iftVoxel iftGetAdjacentVoxel(const iftAdjRel *A, iftVoxel u, int adj)
{
iftVoxel v;
v.x = u.x + A->dx[adj];
v.y = u.y + A->dy[adj];
v.z = u.z + A->dz[adj];
return(v);
}
// ---------- iftAdjacency.c end
// ---------- iftColor.c start
iftColor iftRGBColor(int R, int G, int B)
{
iftColor color;
color.val[0] = R;
color.val[1] = G;
color.val[2] = B;
return color;
}
iftColor iftRGBtoYCbCrBT2020(iftColor cin, const int rgbBitDepth, const int yCbCrBitDepth)
{
int minLum, minChr, quantLum, quantChr;
iftColor cout;
switch (yCbCrBitDepth) {
case 8:
minLum = 16; // 16 * 2^(bitDepth-8)
minChr = 128; // 128 * 2^(bitDepth-8)
quantLum = 219.0; // 219 * 2^(bitDepth-8)
quantChr = 224.0; // 224 * 2^(bitDepth-8)
break;
case 10:
minLum = 64; // 16 * 2^(bitDepth-8)
minChr = 512; // 128 * 2^(bitDepth-8)
quantLum = 876; // 219 * 2^(bitDepth-8)
quantChr = 896; // 224 * 2^(bitDepth-8)
break;
case 12:
minLum = 256; // 16 * 2^(bitDepth-8)
minChr = 2048; // 128 * 2^(bitDepth-8)
quantLum = 3504; // 219 * 2^(bitDepth-8)
quantChr = 3584; // 224 * 2^(bitDepth-8)
break;
case 16:
minLum = 4096; // 16 * 2^(bitDepth-8)
minChr = 32768; // 128 * 2^(bitDepth-8)
quantLum = 56064.0; // 219 * 2^(bitDepth-8)
quantChr = 57344.0; // 224 * 2^(bitDepth-8)
break;
default:
iftError("Bit depth not specified in BT.2020", "iftRGBtoYCbCrBT2020");
cout.val[0] = cout.val[1] = cout.val[2] = 0;
return cout;
}
double maxRgbValue = (double) ((1 << rgbBitDepth) - 1);
double r = cin.val[0] / maxRgbValue;
double g = cin.val[1] / maxRgbValue;
double b = cin.val[2] / maxRgbValue;
double y = 0.2627 * r + 0.6780 * g + 0.0593 * b;
double cb = (b - y) / 1.8814;
double cr = (r - y) / 1.4746;
// clip luminance to [0..1] and chrominance to [-0.5..0.5]
if (y < 0.0) y = 0.0;
else if (y > 1.0) y = 1.0;
if (cb < -0.5) cb = -0.5;
else if (cb > 0.5) cb = 0.5;
if (cr < -0.5) cr = -0.5;
else if (cr > 0.5) cr = 0.5;
// perform quantization
cout.val[0] = (int) (y * quantLum) + minLum;
cout.val[1] = (int) (cb * quantChr) + minChr;
cout.val[2] = (int) (cr * quantChr) + minChr;
return cout;
}
iftColor iftRGBtoYCbCr(iftColor cin, int normalization_value)
{
iftColor cout;
float a = (16.0/255.0)*(float)normalization_value;
float b = (128.0/255.0)*(float)normalization_value;
cout.val[0]=(int)(0.256789062*(float)cin.val[0]+
0.504128906*(float)cin.val[1]+
0.09790625*(float)cin.val[2]+a);
cout.val[1]=(int)(-0.148222656*(float)cin.val[0]+
-0.290992187*(float)cin.val[1]+
0.439214844*(float)cin.val[2]+b);
cout.val[2]=(int)(0.439214844*(float)cin.val[0]+
-0.367789063*(float)cin.val[1]+
-0.071425781*(float)cin.val[2]+b);
for(int i=0; i < 3; i++) {
if (cout.val[i] < 0) cout.val[i] = 0;
if (cout.val[i] > normalization_value) cout.val[i] = normalization_value;
}
return(cout);
}
iftColor iftYCbCrtoRGB(iftColor cin, int normalization_value)
{
iftColor cout;
float a = (16.0/255.0)*(float)normalization_value;
float b = (128.0/255.0)*(float)normalization_value;
cout.val[0]=(int)(1.164383562*((float)cin.val[0]-a)+
1.596026786*((float)cin.val[2]-b));
cout.val[1]=(int)(1.164383562*((float)cin.val[0]-a)+
-0.39176229*((float)cin.val[1]-b)+
-0.812967647*((float)cin.val[2]-b));
cout.val[2]=(int)(1.164383562*((float)cin.val[0]-a)+
2.017232143*((float)cin.val[1]-b));
for(int i=0; i < 3; i++) {
if (cout.val[i] < 0) cout.val[i] = 0;
if (cout.val[i] > normalization_value) cout.val[i] = normalization_value;
}
return(cout);
}
iftColor iftYCbCrBT2020toRGB(iftColor cin, const int yCbCrBitDepth, const int rgbBitDepth)
{
int minLum, minChr;
double quantLum, quantChr;
iftColor cout;
switch (yCbCrBitDepth) {
case 8:
minLum = 16; // 16 * 2^(bitDepth-8)
minChr = 128; // 128 * 2^(bitDepth-8)
quantLum = 219.0; // 219 * 2^(bitDepth-8)
quantChr = 224.0; // 224 * 2^(bitDepth-8)
break;
case 10:
minLum = 64; // 16 * 2^(bitDepth-8)
minChr = 512; // 128 * 2^(bitDepth-8)
quantLum = 876.0; // 219 * 2^(bitDepth-8)
quantChr = 896.0; // 224 * 2^(bitDepth-8)
break;
case 12:
minLum = 256; // 16 * 2^(bitDepth-8)
minChr = 2048; // 128 * 2^(bitDepth-8)
quantLum = 3504.0; // 219 * 2^(bitDepth-8)
quantChr = 3584.0; // 224 * 2^(bitDepth-8)
break;
case 16:
minLum = 4096; // 16 * 2^(bitDepth-8)
minChr = 32768; // 128 * 2^(bitDepth-8)
quantLum = 56064.0; // 219 * 2^(bitDepth-8)
quantChr = 57344.0; // 224 * 2^(bitDepth-8)
break;
default:
iftError("Bit depth not specified in BT.2020", "iftYCbCrBT2020toRGB");
cout.val[0] = cout.val[1] = cout.val[2] = 0;
return cout;
}
double y = (cin.val[0] - minLum) / quantLum;
double cb = (cin.val[1] - minChr) / quantChr;
double cr = (cin.val[2] - minChr) / quantChr;
double r = cr * 1.4746 + y;
double b = cb * 1.8814 + y;
double g = (y - 0.2627 * r - 0.0593 * b) / 0.6780;
// clip rgb values to [0..1]
if (r < 0.0) r = 0.0;
else if (r > 1.0) r = 1.0;
if (g < 0.0) g = 0.0;
else if (g > 1.0) g = 1.0;
if (b < 0.0) b = 0.0;
else if (b > 1.0) b = 1.0;
// perform quantization
double maxRgbValue = (double) ((1 << rgbBitDepth) - 1);
cout.val[0] = (int) (r * maxRgbValue);
cout.val[1] = (int) (g * maxRgbValue);
cout.val[2] = (int) (b * maxRgbValue);
return cout;
}
iftFColor iftRGBtoLabNorm(iftColor rgb, int normalization_value)
{
//RGB to XYZ
float R = rgb.val[0]/(float)normalization_value;
float G = rgb.val[1]/(float)normalization_value;
float B = rgb.val[2]/(float)normalization_value;
if(R <= 0.04045) R = R/12.92;
else R = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) G = G/12.92;
else G = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) B = B/12.92;
else B = pow((B+0.055)/1.055,2.4);
float X = (0.4123955889674142161*R + 0.3575834307637148171*G + 0.1804926473817015735*B);
float Y = (0.2125862307855955516*R + 0.7151703037034108499*G + 0.07220049864333622685*B);
float Z = (0.01929721549174694484*R + 0.1191838645808485318*G + 0.9504971251315797660*B);
//XYZ to lab
X /= WHITEPOINT_X;
Y /= WHITEPOINT_Y;
Z /= WHITEPOINT_Z;
X = LABF(X);
Y = LABF(Y);
Z = LABF(Z);
float L = 116*Y - 16;
float a = 500*(X - Y);
float b = 200*(Y - Z);
iftFColor lab;
lab.val[0] = L;
lab.val[1] = a;
lab.val[2] = b;
return lab;
}
iftColor iftRGBtoHSV(iftColor cin, int normalization_value)
{
float r = ((float)cin.val[0]/normalization_value),
g = ((float)cin.val[1]/normalization_value),
b = ((float)cin.val[2]/normalization_value), v, x, f;
float a[3];
int i;
iftColor cout;
// RGB are each on [0, 1]. S and V are returned on [0, 1] and H is
// returned on [0, 6].
x = iftMin(iftMin(r, g), b);
v = iftMax(iftMax(r, g), b);
if (v == x) {
a[0]=0.0;
a[1]=0.0;
a[2]=v;
} else {
f = (r == x) ? g - b : ((g == x) ? b - r : r - g);
i = (r == x) ? 3 : ((g == x) ? 5 : 1);
a[0]=((float)i)-f/(v-x);
a[1]=(v-x)/v;
a[2]=0.299*r+0.587*g+0.114*b;
}
// (un)normalize
cout.val[0] = (int)(a[0]*60.0);
cout.val[1] = (int)(a[1]*normalization_value);
cout.val[2] = (int)(a[2]*normalization_value);
return(cout);
}
iftColor iftHSVtoRGB(iftColor cin, int normalization_value)
{
// H is given on [0, 6]. S and V are given on [0, 1].
// RGB are each returned on [0, 1].
float h = ((float)cin.val[0]/60.0),
s = ((float)cin.val[1]/normalization_value),
v = ((float)cin.val[2]/normalization_value), m, n, f;
float a[3]={0,0,0};
int i;
iftColor cout;
if (s==0.0) {
a[0]=a[1]=a[2]=v;
} else {
i = (int) floor(h);
f = h - (float)i;
if(!(i & 1)) f = 1 - f; // if i is even
m = v * (1 - s);
n = v * (1 - s * f);
switch (i) {
case 6:
case 0: a[0]=v; a[1]=n; a[2]=m; break;
case 1: a[0]=n; a[1]=v; a[2]=m; break;
case 2: a[0]=m; a[1]=v; a[2]=n; break;
case 3: a[0]=m; a[1]=n; a[2]=v; break;
case 4: a[0]=n; a[1]=m; a[2]=v; break;
case 5: a[0]=v; a[1]=m; a[2]=n; break;
}
}
// (un)normalize
for(i=0;i<3;i++)
cout.val[i]=a[i]*normalization_value;
return(cout);
}
// ---------- iftColor.c end
// ---------- iftDHeap.c start
iftDHeap *iftCreateDHeap(int n, double *value)
{
iftDHeap *H = NULL;
int i;
if (value == NULL) {
iftError("Cannot create heap without priority value map", "iftCreateDHeap");
}
H = (iftDHeap *) iftAlloc(1, sizeof(iftDHeap));
if (H != NULL) {
H->n = n;
H->value = value;
H->color = (char *) iftAlloc(sizeof(char), n);
H->node = (int *) iftAlloc(sizeof(int), n);
H->pos = (int *) iftAlloc(sizeof(int), n);
H->last = -1;
H->removal_policy = MINVALUE;
if (H->color == NULL || H->pos == NULL || H->node == NULL)
iftError(MSG_MEMORY_ALLOC_ERROR, "iftCreateDHeap");
for (i = 0; i < H->n; i++) {
H->color[i] = IFT_WHITE;
H->pos[i] = -1;
H->node[i] = -1;
}
}
else
iftError(MSG_MEMORY_ALLOC_ERROR, "iftCreateDHeap");
return H;
}
void iftDestroyDHeap(iftDHeap **H)
{
iftDHeap *aux = *H;
if (aux != NULL) {
if (aux->node != NULL) iftFree(aux->node);
if (aux->color != NULL) iftFree(aux->color);
if (aux->pos != NULL) iftFree(aux->pos);
iftFree(aux);
*H = NULL;
}
}
char iftFullDHeap(iftDHeap *H)
{
if (H->last == (H->n - 1))
return 1;
else
return 0;
}
char iftEmptyDHeap(iftDHeap *H)
{
if (H->last == -1){
return 1;
}else{
return 0;
}
}
char iftInsertDHeap(iftDHeap *H, int node)
{
if (!iftFullDHeap(H)) {
H->last++;
H->node[H->last] = node;
H->color[node] = IFT_GRAY;
H->pos[node] = H->last;
iftGoUpDHeap(H, H->last);
return 1;
} else {
iftWarning("DHeap is full","iftInsertDHeap");
return 0;
}
}
int iftRemoveDHeap(iftDHeap *H)
{
int node= IFT_NIL;
if (!iftEmptyDHeap(H)) {
node = H->node[0];
H->pos[node] = -1;
H->color[node] = IFT_BLACK;
H->node[0] = H->node[H->last];
H->pos[H->node[0]] = 0;
H->node[H->last] = -1;
H->last--;
iftGoDownDHeap(H, 0);
}else{
iftWarning("DHeap is empty","iftRemoveDHeap");
}
return node;
}
void iftRemoveDHeapElem(iftDHeap *H, int pixel)
{
if(H->pos[pixel] == -1)
iftError("Element is not in the Heap", "iftRemoveDHeapElem");
double aux = H->value[pixel];
if(H->removal_policy == MINVALUE)
H->value[pixel] = IFT_INFINITY_DBL_NEG;
else
H->value[pixel] = IFT_INFINITY_DBL;
iftGoUpDHeap(H, H->pos[pixel]);
iftRemoveDHeap(H);
H->value[pixel] = aux;
H->color[pixel] = IFT_WHITE;
}
void iftGoUpDHeap(iftDHeap *H, int i)
{
int j = iftDad(i);
if(H->removal_policy == MINVALUE){
while ((j >= 0) && (H->value[H->node[j]] > H->value[H->node[i]])) {
iftSwap(H->node[j], H->node[i]);
H->pos[H->node[i]] = i;
H->pos[H->node[j]] = j;
i = j;
j = iftDad(i);
}
}
else{ /* removal_policy == MAXVALUE */
while ((j >= 0) && (H->value[H->node[j]] < H->value[H->node[i]])) {
iftSwap(H->node[j], H->node[i]);
H->pos[H->node[i]] = i;
H->pos[H->node[j]] = j;
i = j;
j = iftDad(i);
}
}
}
void iftGoDownDHeap(iftDHeap *H, int i)
{
int j, left = iftLeftSon(i), right = iftRightSon(i);
j = i;
if(H->removal_policy == MINVALUE){
if ((left <= H->last) &&
(H->value[H->node[left]] < H->value[H->node[i]]))
j = left;
if ((right <= H->last) &&
(H->value[H->node[right]] < H->value[H->node[j]]))
j = right;
}
else{ /* removal_policy == MAXVALUE */
if ((left <= H->last) &&
(H->value[H->node[left]] > H->value[H->node[i]]))
j = left;
if ((right <= H->last) &&
(H->value[H->node[right]] > H->value[H->node[j]]))
j = right;
}
if(j != i) {
iftSwap(H->node[j], H->node[i]);
H->pos[H->node[i]] = i;
H->pos[H->node[j]] = j;
iftGoDownDHeap(H, j);
}
}
void iftResetDHeap(iftDHeap *H)
{
int i;
for (i=0; i < H->n; i++) {
H->color[i] = IFT_WHITE;
H->pos[i] = -1;
H->node[i] = -1;
}
H->last = -1;
}
// ---------- iftDHeap.c end
// ---------- iftFile.c start
bool iftFileExists(const char *pathname)
{
return (iftPathnameExists(pathname) && !iftDirExists(pathname));
}
const char *iftFileExt(const char *pathname)
{
if (pathname == NULL)
iftError("Pathname is NULL", "iftFileExt");
const char *dot = strrchr(pathname, '.'); // returns a pointer to the last occurrence of '.'
if ( (!dot) || (dot == pathname)) {
return ("");
} else {
if (iftRegexMatch(pathname, "^.*\\.tar\\.(gz|bz|bz2)$") || iftRegexMatch(pathname, "^.*\\.(scn|nii)\\.gz$")) {
dot -= 4; // points to the penultimate dot '.'
}
return dot; // returns the extension with '.'
}
}
char *iftJoinPathnames(long n, ...)
{
if (n <= 0)
iftError("Number of pathnames to be concatenated is <= 0", "iftJoinPathnames");
long out_str_size = 1; // '\0'
// Counts the size of the concatenated string
va_list path_list;
va_start(path_list, n);
for (int i = 0; i < n; i++)
out_str_size += strlen(va_arg(path_list, char*)) + 1; // one char for '/' (separation char)
va_end(path_list);
char *joined_path = iftAllocCharArray(out_str_size);
char *aux = iftAllocCharArray(out_str_size);
va_start(path_list, n);
strcpy(joined_path, va_arg(path_list, char*));
for (int i = 1; i < n; i++) {
char *path = va_arg(path_list, char*);
if (iftStartsWith(path, IFT_SEP_C))
path++; // skip the first char, which is the directory separator
if (iftEndsWith(joined_path, IFT_SEP_C))
sprintf(aux, "%s%s", joined_path, path);
else
sprintf(aux, "%s%s%s", joined_path, IFT_SEP_C, path);
iftFree(joined_path);
joined_path = iftCopyString(aux);
}
iftFree(aux);
return joined_path;
}
char *iftFilename(const char *pathname, const char *suffix)
{
if (pathname == NULL)
iftError("Pathname is NULL", "iftFilename");
char *base = iftSplitStringAt(pathname, IFT_SEP_C, -1);
if ((suffix != NULL) && (!iftCompareStrings(suffix, ""))) {
char *out_base = iftRemoveSuffix(base, suffix);
iftFree(base);
base = out_base;
}
return base;
}
void iftDestroyFile(iftFile **f) {
if (f != NULL) {
iftFile *f_aux = *f;
if (f_aux != NULL) {
if (f_aux->path != NULL) {
iftFree(f_aux->path);
f_aux->path = NULL;
}
if(f_aux->suffix != NULL) {
iftFree(f_aux->suffix);
f_aux->suffix = NULL;
}
iftFree(f_aux);
*f = NULL;
}
}
}
bool iftIsImageFile(const char *img_pathname) {
if (img_pathname == NULL)
iftError("Image Pathname is NULL", "iftIsImageFile");
return (iftFileExists(img_pathname) && iftIsImagePathnameValid(img_pathname));
}
bool iftIsImagePathnameValid(const char *img_pathname) {
if (img_pathname == NULL)
iftError("Image Pathname is NULL", "iftIsImagePathnameValid");
char *lower_pathname = iftLowerString(img_pathname);
bool is_valid = iftRegexMatch(lower_pathname, "^.+\\.(jpg|jpeg|pgm|ppm|scn|png|scn\\.gz|zscn|hdr|nii|nii\\.gz)$");
iftFree(lower_pathname);
return is_valid;
}
iftFile *iftCreateFile(const char *format, ...) {
va_list args;
char pathname[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(pathname, format, args);
va_end(args);
// it is a Directory instead of a File
if (iftDirExists(pathname))
iftError("Pathname \"%s\" is a directory", "iftCreateFile", pathname);
iftFile *f = (iftFile*) iftAlloc(1, sizeof(iftFile));
f->path = iftCopyString(pathname);
f->suffix = NULL;
return f;
}
char *iftExpandUser(const char *path) {
return iftReplaceString(path,"~", getenv("HOME"));
}
iftFile *iftCopyFile(const iftFile* file) {
iftFile* f = NULL;
if (file == NULL)
iftError("The file to be copied is NULL", "iftCopyFile");
else {
f = (iftFile*) iftAlloc(1, sizeof(iftFile));
f->path = iftCopyString(file->path);
f->sample = file->sample;
f->label = file->label;
f->status = file->status;
f->suffix = NULL;
if(file->suffix != NULL)
f->suffix = iftCopyString(file->suffix);
}
return f;
}
// ---------- iftFile.c end
// ---------- iftBMap.c start
iftBMap *iftCreateBMap(int n)
{
iftBMap *b;
b= (iftBMap *) iftAlloc(1,sizeof(iftBMap));
b->n = n;
b->nbytes = n/8;
if (n%8) b->nbytes++;
b->val = (char *) iftAlloc(b->nbytes,sizeof(char));
if (b->val==NULL){
iftError(MSG_MEMORY_ALLOC_ERROR, "iftCreateBMap");
}
return b;
}
void iftDestroyBMap(iftBMap **bmap)
{
iftBMap *aux=*bmap;
if (aux != NULL) {
iftFree(aux->val);
iftFree(aux);
*bmap=NULL;
}
}
// ---------- iftBMap.c end
// ---------- iftImage.c start
#if IFT_LIBPNG
#include <png.h>
#endif
#if IFT_LIBJPEG
#include <jpeglib.h>
#endif
iftImage *iftReadImageByExt(const char *format, ...)
{
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
if (!iftFileExists(filename))
iftError("Image %s does not exist", "iftReadImageByExt", filename);
iftImage *img = NULL;
char *ext = iftLowerString(iftFileExt(filename));
if(iftCompareStrings(ext, ".png")) {
img = iftReadImagePNG(filename);
}
else if (iftCompareStrings(ext, ".pgm")){
FILE *fp = fopen(filename,"r");
char type[10];
if(fscanf(fp,"%s",type)!=1) iftError("Reading Error", "iftReadImageByExt");
if (iftCompareStrings(type,"P5")){
fclose(fp);
img = iftReadImageP5(filename);
} else {
fclose(fp);
img = iftReadImageP2(filename);
}
} else if (iftCompareStrings(ext, ".ppm")){
img = iftReadImageP6(filename);
} else if (iftCompareStrings(ext, ".scn")){
img = iftReadImage(filename);
} else if (iftCompareStrings(ext, ".jpg") || iftCompareStrings(ext, ".jpeg")){
img = iftReadImageJPEG(filename);
} else {
iftError("Invalid image format: \"%s\" - Try .scn, .ppm, .pgm, .jpg, .png",
"iftReadImageByExt", ext);
}
iftFree(ext);
return(img);
}
iftImage *iftCreateImage(int xsize,int ysize,int zsize)
{
int *val = iftAllocIntArray(xsize*ysize*zsize);
return iftCreateImageFromBuffer(xsize, ysize, zsize, val);
}
iftImage *iftCopyImage(const iftImage *img)
{
if (img == NULL)
return NULL;
iftImage *imgc=iftCreateImage(img->xsize,img->ysize,img->zsize);
iftCopyImageInplace(img, imgc);
return(imgc);
}
void iftDestroyImage(iftImage **img)
{
if(img != NULL) {
iftImage *aux = *img;
if (aux != NULL) {
if (aux->val != NULL) iftFree(aux->val);
if (aux->Cb != NULL) iftFree(aux->Cb);
if (aux->Cr != NULL) iftFree(aux->Cr);
if (aux->alpha != NULL) iftFree(aux->alpha);
if (aux->tby != NULL) iftFree(aux->tby);
if (aux->tbz != NULL) iftFree(aux->tbz);
iftFree(aux);
*img = NULL;
}
}
}
void iftWriteImageByExt(const iftImage *img, const char *format, ...)
{
if (img == NULL)
iftWarning("Image is NULL... Nothing to write", "iftWriteImageByExt");
else {
char command[400];
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
char *parent_dir = iftParentDir(filename);
if (!iftDirExists(parent_dir))
iftMakeDir(parent_dir);
iftFree(parent_dir);
char *ext = iftLowerString(iftFileExt(filename));
if(iftCompareStrings(ext, ".png")) {
iftWriteImagePNG(img,filename);
} else if (iftCompareStrings(ext, ".scn")) {
iftWriteImage(img, filename);
}else if (iftCompareStrings(ext, ".pgm")) {
if (iftMaximumValue(img)>255)
iftWriteImageP2(img,filename);
else
iftWriteImageP5(img,filename);
} else if (iftCompareStrings(ext, ".ppm")){
iftWriteImageP6(img,filename);
} else if (iftIsColorImage(img)){
iftWriteImageP6(img,"temp.ppm");
sprintf(command,"convert temp.ppm %s",filename);
if (system(command)==-1)
iftError("Program convert failed or is not installed", "iftWriteImageByExt");
if (system("rm -f temp.ppm")==-1)
iftError("Cannot remore temp.ppm", "iftWriteImageByExt");
} else if(iftCompareStrings(ext, ".jpg") || iftCompareStrings(ext, ".jpeg")) {
iftWriteImageJPEG(img,filename);
} else {
printf("Invalid image format: %s. Please select among the accepted ones: .scn, .ppm, .pgm, .png\n",ext);
exit(-1);
}
iftFree(ext);
}
}
int iftMaximumValue(const iftImage *img)
{
if (img == NULL)
iftError("Image is NULL", "iftMaximumValue");
iftBoundingBox bb;
bb.begin.x = bb.begin.y = bb.begin.z = 0;
bb.end.x = img->xsize-1;
bb.end.y = img->ysize-1;
bb.end.z = img->zsize-1;
return iftMaximumValueInRegion(img, bb);
}
int iftMinimumValue(const iftImage *img)
{
int img_min_val = IFT_INFINITY_INT;
for (int p = 0; p < img->n; p++)
if (img_min_val > img->val[p])
img_min_val = img->val[p];
return img_min_val;
}
inline iftVoxel iftGetVoxelCoord(const iftImage *img, int p)
{
/* old
* u.x = (((p) % (((img)->xsize)*((img)->ysize))) % (img)->xsize)
* u.y = (((p) % (((img)->xsize)*((img)->ysize))) / (img)->xsize)
* u.z = ((p) / (((img)->xsize)*((img)->ysize)))
*/
iftVoxel u;
div_t res1 = div(p, img->xsize * img->ysize);
div_t res2 = div(res1.rem, img->xsize);
u.x = res2.rem;
u.y = res2.quot;
u.z = res1.quot;
return u;
}
iftImage *iftSelectImageDomain(int xsize, int ysize, int zsize)
{
iftImage *mask=iftCreateImage(xsize,ysize,zsize);
iftSetImage(mask,1);
return(mask);
}
iftBoundingBox iftMinBoundingBox(const iftImage *img, iftVoxel *gc_out)
{
if (img == NULL)
iftError("Image is NULL", "iftMinBoundingBox");
long n = 0; // number of spels non-background (non-zero)
iftVoxel gc = {0.0, 0.0, 0.0};
iftBoundingBox mbb;
mbb.begin.x = mbb.begin.y = mbb.begin.z = IFT_INFINITY_INT;
mbb.end.x = mbb.end.y = mbb.end.z = IFT_INFINITY_INT_NEG;
for (long p = 0; p < img->n; p++) {
if (img->val[p] != 0) {
iftVoxel v = iftGetVoxelCoord(img, p);
mbb.begin.x = iftMin(mbb.begin.x, v.x);
mbb.begin.y = iftMin(mbb.begin.y, v.y);
mbb.begin.z = iftMin(mbb.begin.z, v.z);
mbb.end.x = iftMax(mbb.end.x, v.x);
mbb.end.y = iftMax(mbb.end.y, v.y);
mbb.end.z = iftMax(mbb.end.z, v.z);
gc.x += v.x;
gc.y += v.y;
gc.z += v.z;
n++;
}
}
if (mbb.begin.x == IFT_INFINITY_INT) {
mbb.begin.x = mbb.begin.y = mbb.begin.z = -1;
mbb.end.x = mbb.end.y = mbb.end.z = -1;
gc.x = gc.y = gc.z = -1.0;
} else {
gc.x /= n;
gc.y /= n;
gc.z /= n;
}
if (gc_out != NULL)
*gc_out = gc;
return mbb;
}
iftImage *iftReadImage(const char *format, ...)
{
iftImage *img = NULL;
FILE *fp = NULL;
uchar *data8 = NULL;
ushort *data16 = NULL;
int *data32 = NULL;
char type[10];
int p, v, xsize, ysize, zsize;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename, "rb");
if (fp == NULL) {
iftError("Cannot open file: \"%s\"", "iftReadImage", filename);
}
if (fscanf(fp, "%s\n", type) != 1)
iftError("Reading error: Image type", "iftReadImage");
if (iftCompareStrings(type, "SCN")) {
//iftSkipComments(fp);
if (fscanf(fp, "%d %d %d\n", &xsize, &ysize, &zsize) != 3)
iftError("Reading error: Image resolution/size", "iftReadImage");
img = iftCreateImage(xsize, ysize, zsize);
if (fscanf(fp, "%f %f %f\n", &img->dx, &img->dy, &img->dz) != 3) {
iftError("Reading error: Pixel/Voxel size", "iftReadImage");
}
if (fscanf(fp, "%d", &v) != 1)
iftError("Reading error", "iftReadImage");
while (fgetc(fp) != '\n');
if (v == 8) {
data8 = iftAllocUCharArray(img->n);
if (fread(data8, sizeof(uchar), img->n, fp) != (uint)img->n)
iftError("Reading error", "iftReadImage");
for (p = 0; p < img->n; p++)
img->val[p] = (int) data8[p];
iftFree(data8);
} else if (v == 16) {
data16 = iftAllocUShortArray(img->n);
if (fread(data16, sizeof(ushort), img->n, fp) != (uint)img->n)
iftError("Reading error 16 bits", "iftReadImage");
for (p = 0; p < img->n; p++)
img->val[p] = (int) data16[p];
iftFree(data16);
} else if (v == 32) {
data32 = iftAllocIntArray(img->n);
if (fread(data32, sizeof(int), img->n, fp) != (uint)img->n)
iftError("Reading error", "iftReadImage");
for (p = 0; p < img->n; p++)
img->val[p] = data32[p];
iftFree(data32);
} else {
iftError("Input scene must be 8, 16, or 32 bit", "iftReadImage");
}
} else {
iftError("Invalid file type", "iftReadImage");
}
fclose(fp);
return (img);
}
#if IFT_LIBPNG
png_bytep* iftReadPngImageAux(const char *file_name, png_structp *png_ptr, png_infop *info_ptr)
{
png_byte header[8]; // 8 is the maximum size that can be checked
/* open file and test for it being a png */
FILE *fp = fopen(file_name, "rb");
if (!fp)
iftError("File %s could not be opened for reading", "iftReadPngImageAux", file_name);
if (fread(header, 1, 8, fp)!=8) iftError("Reading error", "iftReadPngImageAux");
if (png_sig_cmp(header, 0, 8))
iftError("File %s is not recognized as a PNG file", "iftReadPngImageAux", file_name);
int height;
png_bytep * row_pointers;
/* initialize stuff */
png_structp ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!ptr)
iftError("Internal error: png_create_read_struct failed", "iftReadImagePNG");
*png_ptr = ptr;
*info_ptr = png_create_info_struct(*png_ptr);
int depth = png_get_bit_depth((*png_ptr), (*info_ptr));
if(depth < 8){
png_set_expand_gray_1_2_4_to_8(ptr);
}
if (!(*info_ptr))
iftError("Internal error: png_create_info_struct failed", "iftReadImagePNG");
if (setjmp(png_jmpbuf(*png_ptr)))
iftError("Internal error: Error during init_io", "iftReadImagePNG");
png_init_io(*png_ptr, fp);
png_set_sig_bytes(*png_ptr, 8);
png_read_info(*png_ptr, *info_ptr);
// reduces the pixels back down to the original bit depth
//png_color_8p sig_bit = NULL;
//if (png_get_sBIT(*png_ptr, *info_ptr, &sig_bit)) {
// png_set_shift(*png_ptr, sig_bit);
//}
height = png_get_image_height(*png_ptr, *info_ptr);
png_read_update_info(*png_ptr, *info_ptr);
/* read file */
if (setjmp(png_jmpbuf(*png_ptr)))
iftError("Internal error: Error during read_image", "iftReadImagePNG");
row_pointers = (png_bytep*) iftAlloc(height, sizeof(png_bytep));
for (int y=0; y<height; y++)
row_pointers[y] = (png_byte*) iftAlloc(png_get_rowbytes(*png_ptr, *info_ptr), 1);
png_read_image(*png_ptr, row_pointers);
fclose(fp);
return row_pointers;
}
#endif
iftImage* iftReadImagePNG(const char* format, ...)
{
#if IFT_LIBPNG
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
png_infop info_ptr;
png_structp png_ptr;
png_bytep *row_pointers;
row_pointers = iftReadPngImageAux(filename, &png_ptr, &info_ptr);
int width, height, color_type, depth;
width = png_get_image_width(png_ptr, info_ptr);
height = png_get_image_height(png_ptr, info_ptr);
color_type = png_get_color_type(png_ptr, info_ptr);
depth = png_get_bit_depth(png_ptr, info_ptr);
iftImage* img = iftCreateImage(width, height, 1);
unsigned int numberChannels = png_get_channels(png_ptr, info_ptr);
int byteshift = depth/8;
int x, y;
int p = 0;
if(color_type==PNG_COLOR_TYPE_GRAY)//gray image
{
for (y=0; y<height; y++) {
png_byte* row = row_pointers[y];
for (x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberChannels*byteshift]);
img->val[p] = ptr[0];
if(depth==16) {
img->val[p] = (img->val[p]<<8)+ptr[1];
}
p++;
}
}
}else if(color_type==PNG_COLOR_TYPE_GRAY_ALPHA ){
if(img->alpha == NULL){
iftSetAlpha(img,0);
}
for (y=0; y<height; y++) {
png_byte* row = row_pointers[y];
for (x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberChannels*byteshift]);
if(depth == 8){
img->val[p] = ptr[0];
img->alpha[p] = ptr[1];
}
else if(depth==16) {
img->val[p] = ptr[0];
img->val[p] = (img->val[p]<<8)+ptr[1];
img->alpha[p] = ptr[2];
img->alpha[p] = (img->alpha[p]<<8)+ptr[3];
}
p++;
}
}
}
else if(color_type == PNG_COLOR_TYPE_RGB){//color image
iftSetCbCr(img, 128);
iftColor rgb, ycbcr;
for (y=0; y<height; y++) {
png_byte* row = row_pointers[y];
for (x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberChannels*byteshift]);
rgb.val[0] = ptr[0*byteshift];
rgb.val[1] = ptr[1*byteshift];
rgb.val[2] = ptr[2*byteshift];
if(depth==16) { //read second byte in case of 16bit images
rgb.val[0] = (rgb.val[0]<<8) + ptr[1];
rgb.val[1] = (rgb.val[1]<<8) + ptr[3];
rgb.val[2] = (rgb.val[2]<<8) + ptr[5];
}
ycbcr = iftRGBtoYCbCrBT2020(rgb, depth, depth);
img->val[p] = ycbcr.val[0];
img->Cb[p] = ycbcr.val[1];
img->Cr[p] = ycbcr.val[2];
p++;
}
}
}else if(color_type == PNG_COLOR_TYPE_RGB_ALPHA){
iftSetCbCr(img, 128);
iftColor rgb, ycbcr;
if(img->alpha == NULL){
iftSetAlpha(img,0);
}
for (y=0; y<height; y++) {
png_byte* row = row_pointers[y];
for (x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberChannels*byteshift]);
rgb.val[0] = ptr[0*byteshift];
rgb.val[1] = ptr[1*byteshift];
rgb.val[2] = ptr[2*byteshift];
ushort alpha = ptr[3*byteshift];
if(depth==16) { //read second byte in case of 16bit images
rgb.val[0] = (rgb.val[0]<<8) + ptr[1];
rgb.val[1] = (rgb.val[1]<<8) + ptr[3];
rgb.val[2] = (rgb.val[2]<<8) + ptr[5];
alpha = (alpha<<8) + ptr[7];
}
ycbcr = iftRGBtoYCbCr(rgb, depth==8?255:65535);
img->val[p] = ycbcr.val[0];
img->Cb[p] = ycbcr.val[1];
img->Cr[p] = ycbcr.val[2];
img->alpha[p] = alpha;
p++;
}
}
}
for (y = 0; y < height; ++y) {
iftFree(row_pointers[y]);
}
iftFree(row_pointers);
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
img->dz = 0.0;
return img;
#else
iftError("LibPNG support was not enabled!","iftReadImagePNG");
return NULL;
#endif
}
iftImage* iftReadImageJPEG(const char* format, ...)
{
#if IFT_LIBJPEG
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
iftImage* image = NULL;
//code based on externals/libjpeg/source/example.c
/* This struct contains the JPEG decompression parameters and pointers to
* working space (which is allocated as needed by the JPEG library).
*/
struct jpeg_decompress_struct cinfo;
/* We use our private extension JPEG error handler.
* Note that this struct must live as long as the main JPEG parameter
* struct, to avoid dangling-pointer problems.
*/
struct jpeg_error_mgr jerr;
/* More stuff */
FILE * infile; /* source file */
JSAMPARRAY buffer; /* Output row buffer */
int row_stride; /* physical row width in output buffer */
/* In this example we want to open the input file before doing anything else,
* so that the setjmp() error recovery below can assume the file is open.
* VERY IMPORTANT: use "b" option to fopen() if you are on a machine that
* requires it in order to read binary files.
*/
if ((infile = fopen(filename, "rb")) == NULL) {
printf("[readImageJPEG] can't open %s\n",filename);
return NULL;
}
/* Step 1: allocate and initialize JPEG decompression object */
/* We set up the normal JPEG error routines, then override error_exit. */
cinfo.err = jpeg_std_error(&jerr);
//jerr.pub.error_exit = my_error_exit;
/* Establish the setjmp return context for my_error_exit to use. */
jmp_buf setjmp_buffer;
if (setjmp(setjmp_buffer)) {
/* If we get here, the JPEG code has signaled an error.
* We need to clean up the JPEG object, close the input file, and return.
*/
jpeg_destroy_decompress(&cinfo);
printf("[readImageJPEG] code has signaled an error\n");
fclose(infile);
return NULL;
}
/* Now we can initialize the JPEG decompression object. */
jpeg_create_decompress(&cinfo);
/* Step 2: specify data source (eg, a file) */
jpeg_stdio_src(&cinfo, infile);
/* Step 3: read file parameters with jpeg_read_header() */
(void) jpeg_read_header(&cinfo, TRUE);
/* We can ignore the return value from jpeg_read_header since
* (a) suspension is not possible with the stdio data source, and
* (b) we passed TRUE to reject a tables-only JPEG file as an error.
* See libjpeg.txt for more info.
*/
/* Step 4: set parameters for decompression */
/* In this example, we don't need to change any of the defaults set by
* jpeg_read_header(), so we do nothing here.
*/
/* Step 5: Start decompressor */
(void) jpeg_start_decompress(&cinfo);
/* We can ignore the return value since suspension is not possible
* with the stdio data source.
*/
/* We may need to do some setup of our own at this point before reading
* the data. After jpeg_start_decompress() we have the correct scaled
* output image dimensions available, as well as the output colormap
* if we asked for color quantization.
* In this example, we need to make an output work buffer of the right size.
*/
/* JSAMPLEs per row in output buffer */
row_stride = cinfo.output_width * cinfo.output_components;
/* Make a one-row-high sample array that will go away when done with image */
buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
/* Step 6: while (scan lines remain to be read) */
/* jpeg_read_scanlines(...); */
/* Here we use the library's state variable cinfo.output_scanline as the
* loop counter, so that we don't have to keep track ourselves.
*/
image = iftCreateImage(cinfo.output_width,cinfo.output_height,1);
//0 - JCS_GRAYSCALE
//1 - JCS_RGB
//2 - JCS_YCbCr
//3 - JCS_CMYK
//4 - JCS_YCCK
//5 - JCS_BG_RGB
//6 - JCS_BG_YCC
unsigned int imageRow = 0;
unsigned int imageCol = 0;
iftColor rgb;
iftColor YCbCr;
float scalingFactor = pow(2,cinfo.data_precision)-1;
switch (cinfo.out_color_space){
case JCS_GRAYSCALE:
imageRow = 0;
while (cinfo.output_scanline < cinfo.output_height){
jpeg_read_scanlines(&cinfo, buffer, 1);
imageCol = 0;
for (unsigned int i = 0; i < (unsigned int)cinfo.output_width; i++) {
int shift = i*cinfo.num_components;
iftImgVal(image,imageCol,imageRow,0) = buffer[0][shift];
imageCol++;
}
imageRow++;
}
break;
case JCS_RGB:
iftSetCbCr(image,128);
imageRow = 0;
while (cinfo.output_scanline < cinfo.output_height){
jpeg_read_scanlines(&cinfo, buffer, 1);
imageCol = 0;
for (unsigned int i = 0; i < (unsigned int)cinfo.output_width; i++) {
int shift = i*cinfo.num_components;
rgb.val[0] = buffer[0][shift+0];
rgb.val[1] = buffer[0][shift+1];
rgb.val[2] = buffer[0][shift+2];
YCbCr = iftRGBtoYCbCr(rgb,scalingFactor);
iftImgVal(image,imageCol,imageRow,0) = YCbCr.val[0];
iftImgCb(image,imageCol,imageRow,0) = YCbCr.val[1];
iftImgCr(image,imageCol,imageRow,0) = YCbCr.val[2];
imageCol++;
}
imageRow++;
}
break;
case JCS_YCbCr:
iftSetCbCr(image,128);
imageRow = 0;
while (cinfo.output_scanline < cinfo.output_height){
jpeg_read_scanlines(&cinfo, buffer, 1);
imageCol = 0;
for (unsigned int i = 0; i < (unsigned int)cinfo.output_width; i++) {
int shift = i*cinfo.num_components;
iftImgVal(image,imageCol,imageRow,0) = buffer[0][shift+0];
iftImgCb(image,imageCol,imageRow,0) = buffer[0][shift+1];
iftImgCr(image,imageCol,imageRow,0) = buffer[0][shift+2];
imageCol++;
}
imageRow++;
}
break;
case JCS_CMYK:
iftSetCbCr(image,128);
imageRow = 0;
while (cinfo.output_scanline < cinfo.output_height){
jpeg_read_scanlines(&cinfo, buffer, 1);
imageCol = 0;
for (unsigned int i = 0; i < (unsigned int)cinfo.output_width; i++) {
int shift = i*cinfo.num_components;
//convert CMYK to RGB (reference: http://www.rapidtables.com/convert/color/cmyk-to-rgb.htm)
rgb.val[0] = 255*(100-buffer[0][shift+0])*(100-buffer[0][shift+3]);
rgb.val[1] = 255*(100-buffer[0][shift+1])*(100-buffer[0][shift+3]);;
rgb.val[2] = 255*(100-buffer[0][shift+2])*(100-buffer[0][shift+3]);;
YCbCr = iftRGBtoYCbCr(rgb,scalingFactor);
iftImgVal(image,imageCol,imageRow,0) = YCbCr.val[0];
iftImgCb(image,imageCol,imageRow,0) = YCbCr.val[1];
iftImgCr(image,imageCol,imageRow,0) = YCbCr.val[2];
imageCol++;
}
imageRow++;
}
break;
case JCS_YCCK:
iftSetCbCr(image,128);
imageRow = 0;
iftWarning("Image is Y/Cb/Cr/K color space. The channel K is ignored", "iftReadImageJPEG");
while (cinfo.output_scanline < cinfo.output_height){
jpeg_read_scanlines(&cinfo, buffer, 1);
imageCol = 0;
for (unsigned int i = 0; i < (unsigned int)cinfo.output_width; i++) {
int shift = i*cinfo.num_components;
iftImgVal(image,imageCol,imageRow,0) = buffer[0][shift+0];
iftImgCb(image,imageCol,imageRow,0) = buffer[0][shift+1];
iftImgCr(image,imageCol,imageRow,0) = buffer[0][shift+2];
imageCol++;
}
imageRow++;
}
break;
case JCS_BG_RGB:
iftError("Big gamut red/green/blue color space not supported", "iftReadImageJPEG");
break;
case JCS_BG_YCC:
iftError("Big gamut red/green/blue color space not supported", "iftReadImageJPEG");
break;
default:
iftError("Unkwon color space", "iftReadImageJPEG");
break;
}
/* Step 7: Finish decompression */
(void) jpeg_finish_decompress(&cinfo);
/* This is an important step since it will release a good deal of memory. */
jpeg_destroy_decompress(&cinfo);
/* After finish_decompress, we can close the input file.
* Here we postpone it until after no more JPEG errors are possible,
* so as to simplify the setjmp error logic above. (Actually, I don't
* think that jpeg_destroy can do an error exit, but why assume anything...)
*/
fclose(infile);
/* At this point you may want to check to see whether any corrupt-data
* warnings occurred (test whether jerr.pub.num_warnings is nonzero).
*/
//jerr.num_warnings; //useful to know about corrupted data
//printf("%ld\n",jerr.num_warnings);
return image;
#else
iftError("LibJPEG support was not enabled!","iftReadImageJPEG");
return NULL;
#endif
}
iftImage *iftReadImageP5(const char *format, ...)
{
iftImage *img = NULL;
FILE *fp = NULL;
uchar *data8 = NULL;
ushort *data16 = NULL;
char type[10];
int p, v, xsize, ysize, zsize, hi, lo;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename, "rb");
if (fp == NULL) {
iftError(MSG_FILE_OPEN_ERROR, "iftReadImageP5", filename);
}
if (fscanf(fp, "%s\n", type) != 1) {
iftError("Reading error", "iftReadImageP5");
}
if (iftCompareStrings(type, "P5")) {
iftSkipComments(fp);
if (fscanf(fp, "%d %d\n", &xsize, &ysize) != 2)
iftError("Reading error", "iftReadImageP5");
zsize = 1;
img = iftCreateImage(xsize, ysize, zsize);
img->dz = 0.0;
if (fscanf(fp, "%d", &v) != 1)
iftError("Reading error", "iftReadImageP5");
while (fgetc(fp) != '\n');
if ((v <= 255) && (v > 0)) {
data8 = iftAllocUCharArray(img->n);
if (fread(data8, sizeof(uchar), img->n, fp) != (uint)img->n)
iftError("Reading error", "iftReadImageP5");
for (p = 0; p < img->n; p++)
img->val[p] = (int) data8[p];
iftFree(data8);
} else if ((v <= 65535) && (v > 255)) {
data16 = iftAllocUShortArray(img->n);
for (p = 0; p < img->n; p++) {
if ((hi = fgetc(fp)) == EOF)
iftError("Reading error", "iftReadImageP5");
if ((lo = fgetc(fp)) == EOF)
iftError("Reading error", "iftReadImageP5");
data16[p] = (hi << 8) + lo;
}
for (p = 0; p < img->n; p++)
img->val[p] = (int) data16[p];
iftFree(data16);
} else {
iftError("Invalid maximum value", "iftReadImageP5");
}
} else {
iftError("Invalid image type", "iftReadImageP5");
}
fclose(fp);
return (img);
}
iftImage *iftReadImageP6(const char *format, ...)
{
iftImage *img=NULL;
FILE *fp=NULL;
char type[10];
int p,v,xsize,ysize,zsize;
ushort rgb16[3];
iftColor RGB,YCbCr;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename,"r");
if (fp == NULL){
iftError(MSG_FILE_OPEN_ERROR, "iftReadImageP6", filename);
}
if(fscanf(fp,"%s\n",type)!=1)
iftError("Reading error", "iftReadImageP6");
if(iftCompareStrings(type,"P6")){
iftSkipComments(fp);
if(fscanf(fp,"%d %d\n",&xsize,&ysize)!=2)
iftError("Reading error", "iftReadImageP6");
zsize = 1;
img = iftCreateImage(xsize,ysize,zsize);
img->Cb = iftAllocUShortArray(img->n);
img->Cr = iftAllocUShortArray(img->n);
img->dz = 0.0;
if (fscanf(fp,"%d",&v)!=1)
iftError("Reading error", "iftReadImageP6");
while(fgetc(fp) != '\n');
if (v >= 0 && v < 256) {
for (p=0; p < img->n; p++) {
RGB.val[0] = fgetc(fp);
RGB.val[1] = fgetc(fp);
RGB.val[2] = fgetc(fp);
YCbCr = iftRGBtoYCbCr(RGB,255);
img->val[p]=YCbCr.val[0];
img->Cb[p] =(ushort)YCbCr.val[1];
img->Cr[p] =(ushort)YCbCr.val[2];
}
} else if (v >= 256 && v <= 65536) {
int rgbBitDepth = ceil(iftLog(v, 2));
int ycbcrBitDepth = rgbBitDepth;
if(ycbcrBitDepth<10)
ycbcrBitDepth = 10;
else if(ycbcrBitDepth < 12)
ycbcrBitDepth = 12;
else if(ycbcrBitDepth < 16)
ycbcrBitDepth = 16;
for (p=0; p < img->n; p++) {
// read 6 bytes for each image pixel
if (fread(rgb16, 2, 3, fp) == 3) {
// the PPM format specifies 2-byte integers as big endian,
// so we need to swap the bytes if the architecture is little endian
RGB.val[0] = ((rgb16[0] & 0xff) << 8) | ((ushort) rgb16[0] >> 8);
RGB.val[1] = ((rgb16[1] & 0xff) << 8) | ((ushort) rgb16[1] >> 8);
RGB.val[2] = ((rgb16[2] & 0xff) << 8) | ((ushort) rgb16[2] >> 8);
YCbCr = iftRGBtoYCbCrBT2020(RGB, rgbBitDepth, ycbcrBitDepth);
// YCbCr = iftRGBtoYCbCr(RGB, v);
img->val[p] = YCbCr.val[0];
img->Cb[p] = (ushort)YCbCr.val[1];
img->Cr[p] = (ushort)YCbCr.val[2];
}
}
} else {
iftError("Invalid maximum value", "iftReadImageP6");
}
}else{
iftError("Invalid image type", "iftReadImageP6");
}
fclose(fp);
return(img);
}
iftImage *iftReadImageP2(const char *format, ...)
{
iftImage *img = NULL;
FILE *fp = NULL;
char type[10];
int p, v, xsize, ysize, zsize;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename, "r");
if (fp == NULL) {
iftError(MSG_FILE_OPEN_ERROR, "iftReadImageP2", filename);
}
if (fscanf(fp, "%s\n", type) != 1)
iftError("Reading error", "iftReadImageP2");
if (iftCompareStrings(type, "P2")) {
iftSkipComments(fp);
if (fscanf(fp, "%d %d\n", &xsize, &ysize) != 2)
iftError("Reading error", "iftReadImageP2");
zsize = 1;
img = iftCreateImage(xsize, ysize, zsize);
img->dz = 0.0;
if (fscanf(fp, "%d", &v) != 1)
iftError("Reading error", "iftReadImageP2");
while (fgetc(fp) != '\n');
for (p = 0; p < img->n; p++)
if (fscanf(fp, "%d", &img->val[p]) != 1)
iftError("Reading error", "iftReadImageP2");
} else {
iftError("Invalid image type", "iftReadImageP2");
}
fclose(fp);
return (img);
}
iftImage *iftCreateImageFromBuffer(int xsize, int ysize, int zsize, int *val)
{
iftImage *img = NULL;
int y, z, xysize;
img = (iftImage *) iftAlloc(1, sizeof(iftImage));
if (img == NULL) {
iftError(MSG_MEMORY_ALLOC_ERROR, "iftCreateImage");
}
img->val = val;
img->Cb = img->Cr = NULL;
img->alpha = NULL;
img->xsize = xsize;
img->ysize = ysize;
img->zsize = zsize;
img->dx = 1.0;
img->dy = 1.0;
img->dz = 1.0;
img->tby = iftAllocIntArray(ysize);
img->tbz = iftAllocIntArray(zsize);
img->n = xsize * ysize * zsize;
if (img->val == NULL || img->tbz == NULL || img->tby == NULL) {
iftError(MSG_MEMORY_ALLOC_ERROR, "iftCreateImage");
}
img->tby[0] = 0;
for (y = 1; y < ysize; y++)
img->tby[y] = img->tby[y - 1] + xsize;
img->tbz[0] = 0;
xysize = xsize * ysize;
for (z = 1; z < zsize; z++)
img->tbz[z] = img->tbz[z - 1] + xysize;
return (img);
}
void iftCopyImageInplace(const iftImage *src, iftImage *dest)
{
int p;
iftVerifyImageDomains(src, dest, "iftCopyImageInplace");
iftCopyVoxelSize(src, dest);
for (p=0; p < src->n; p++)
dest->val[p]= src->val[p];
if (src->Cb != NULL) {
if(dest->Cb == NULL)
dest->Cb = iftAllocUShortArray(src->n);
if(dest->Cr == NULL)
dest->Cr = iftAllocUShortArray(src->n);
for (p=0; p < src->n; p++) {
dest->Cb[p]= src->Cb[p];
dest->Cr[p]= src->Cr[p];
}
}
}
void iftWriteImage(const iftImage *img, const char *format, ...)
{
FILE *fp = NULL;
int p;
uchar *data8 = NULL;
ushort *data16 = NULL;
int *data32 = NULL;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
int img_min_val = iftMinimumValue(img);
int img_max_val = iftMaximumValue(img);
if (img_min_val < 0) {
char msg[200];
sprintf(msg, "Shifting image values from [%d,%d] to [%d,%d] on the original image\n",
img_min_val, img_max_val, 0, img_max_val - img_min_val);
iftWarning(msg, "iftWriteImage");
for (p = 0; p < img->n; p++)
img->val[p] = img->val[p] - img_min_val;
img_max_val = img_max_val - img_min_val;
}
fp = fopen(filename, "wb");
if (fp == NULL)
iftError("Cannot open file: \"%s\"", "iftWriteImage", filename);
fprintf(fp, "SCN\n");
fprintf(fp, "%d %d %d\n", img->xsize, img->ysize, img->zsize);
fprintf(fp, "%f %f %f\n", img->dx, img->dy, img->dz);
if (img_max_val < 256) {
fprintf(fp, "%d\n", 8);
data8 = iftAllocUCharArray(img->n);
for (p = 0; p < img->n; p++)
data8[p] = (uchar) img->val[p];
fwrite(data8, sizeof(uchar), img->n, fp);
iftFree(data8);
} else if (img_max_val < 65536) {
fprintf(fp, "%d\n", 16);
data16 = iftAllocUShortArray(img->n);
for (p = 0; p < img->n; p++)
data16[p] = (ushort) img->val[p];
fwrite(data16, sizeof(ushort), img->n, fp);
iftFree(data16);
} else if (img_max_val < IFT_INFINITY_INT) {
fprintf(fp, "%d\n", 32);
data32 = iftAllocIntArray(img->n);
for (p = 0; p < img->n; p++)
data32[p] = img->val[p];
fwrite(data32, sizeof(int), img->n, fp);
iftFree(data32);
}
fclose(fp);
}
void iftWriteImageP5(const iftImage *img, const char *format, ...)
{
FILE *fp = NULL;
int p, hi, lo;
uchar *data8 = NULL;
ushort *data16 = NULL;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename, "wb");
if (fp == NULL)
iftError(MSG_FILE_OPEN_ERROR, "iftWriteImageP5", filename);
fprintf(fp, "P5\n");
fprintf(fp, "%d %d\n", img->xsize, img->ysize);
int img_max_val = iftMaximumValue(img);
int img_min_val = iftMinimumValue(img);
if ((img_max_val < 256) && (img_min_val >= 0)) {
fprintf(fp, "%d\n", 255);
data8 = iftAllocUCharArray(img->n);
for (p = 0; p < img->n; p++)
data8[p] = (uchar) img->val[p];
fwrite(data8, sizeof(uchar), img->n, fp);
iftFree(data8);
} else if (img_max_val < 65536) {
fprintf(fp, "%d\n", 65535);
data16 = iftAllocUShortArray(img->n);
for (p = 0; p < img->n; p++)
data16[p] = (ushort) img->val[p];
{
#define HI(num) (((num) & 0x0000FF00) >> 8)
#define LO(num) ((num) & 0x000000FF)
for (p = 0; p < img->n; p++) {
hi = HI(data16[p]);
lo = LO(data16[p]);
fputc(hi, fp);
fputc(lo, fp);
}
}
iftFree(data16);
} else {
char msg[200];
sprintf(msg, "Cannot write image as P5 (%d/%d)", img_max_val, img_min_val);
iftError(msg, "iftWriteImageP5");
}
fclose(fp);
}
void iftWriteImageP6(const iftImage *img, const char *format, ...)
{
FILE *fp = NULL;
int p;
ushort rgb16[3];
iftColor YCbCr, RGB;
if (!iftIsColorImage(img))
iftError("Image is not colored", "iftWriteImageP6");
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename, "w");
if (fp == NULL)
iftError(MSG_FILE_OPEN_ERROR, "iftWriteImageP6", filename);
fprintf(fp, "P6\n");
fprintf(fp, "%d %d\n", img->xsize, img->ysize);
int img_max_val = iftMaximumValue(img);
int img_min_val = iftMinimumValue(img);
if (img_min_val < 0) {
iftError("Cannot write image as P6", "iftWriteImageP6");
}
if (img_max_val < 256) {
fprintf(fp, "%d\n", 255);
for (p = 0; p < img->n; p++) {
YCbCr.val[0] = img->val[p];
YCbCr.val[1] = img->Cb[p];
YCbCr.val[2] = img->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr, 255);
fputc(((uchar) RGB.val[0]), fp);
fputc(((uchar) RGB.val[1]), fp);
fputc(((uchar) RGB.val[2]), fp);
}
} else if (img_max_val < 65536) {
// int rgbBitDepth = 9;
// // find the bit depth for the maximum value img_max_val
// while ((1 << rgbBitDepth) <= img_max_val) {
// rgbBitDepth++;
// }
int rgbBitDepth = ceil(iftLog(img_max_val, 2));
fprintf(fp, "%d\n", (1 << rgbBitDepth) - 1);
for (p = 0; p < img->n; p++) {
YCbCr.val[0] = img->val[p];
YCbCr.val[1] = img->Cb[p];
YCbCr.val[2] = img->Cr[p];
RGB = iftYCbCrBT2020toRGB(YCbCr, rgbBitDepth, rgbBitDepth);
// RGB = iftYCbCrtoRGB(YCbCr, img_max_val);
// the PPM format specifies 2-byte integers as big endian,
// so we need to swap the bytes if the architecture is little endian
rgb16[0] = ((RGB.val[0] & 0xff) << 8) | ((ushort) RGB.val[0] >> 8);
rgb16[1] = ((RGB.val[1] & 0xff) << 8) | ((ushort) RGB.val[1] >> 8);
rgb16[2] = ((RGB.val[2] & 0xff) << 8) | ((ushort) RGB.val[2] >> 8);
// write 6 bytes for each image pixel
if (fwrite(rgb16, 2, 3, fp) != 3) {
iftError("Cannot write 16-bit image as P6", "iftWriteImageP6");
}
}
} else {
iftError("Cannot write image as P6", "iftWriteImageP6");
}
fclose(fp);
}
void iftWriteImageP2(const iftImage *img, const char *format, ...)
{
FILE *fp = NULL;
int p;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
int depth = iftImageDepth(img);
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
fp = fopen(filename, "w");
if (fp == NULL)
iftError(MSG_FILE_OPEN_ERROR, "iftWriteImageP2", filename);
fprintf(fp, "P2\n");
fprintf(fp, "%d %d\n", img->xsize, img->ysize);
int img_max_val = iftMaxImageRange(depth);
fprintf(fp, "%d\n", img_max_val);
for (p = 0; p < img->n; p++) {
fprintf(fp, "%d ", img->val[p]);
if (iftGetXCoord(img, p) == (img->xsize - 1)) fprintf(fp, "\n");
}
fclose(fp);
}
#if IFT_LIBPNG
void iftWritePngImageAux(const char *file_name, png_bytep *row_pointers, int width, int height, int bit_depth, int color_type)
{
/* create file */
FILE *fp = fopen(file_name, "wb");
if (!fp)
iftError("Internal Error: File %s could not be opened for writing", "iftWritePngImageAux", file_name);
/* initialize stuff */
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
iftError("Internal Error: png_create_write_struct failed", "iftWriteImagePNG");
png_infop info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr)
iftError("Internal Error: png_create_info_struct failed", "iftWriteImagePNG");
if (setjmp(png_jmpbuf(png_ptr)))
iftError("Internal Error: Error during init_io", "iftWriteImagePNG");
png_init_io(png_ptr, fp);
/* write header */
if (setjmp(png_jmpbuf(png_ptr)))
iftError("Internal Error: Error during writing header", "iftWriteImagePNG");
png_set_IHDR(png_ptr, info_ptr, width, height,
bit_depth, color_type, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
/* write bytes */
if (setjmp(png_jmpbuf(png_ptr)))
iftError("Internal Error: Error during writing bytes", "iftWriteImagePNG");
png_write_image(png_ptr, row_pointers);
/* end write */
if (setjmp(png_jmpbuf(png_ptr)))
iftError("Internal Error: Error during end of write", "iftWriteImagePNG");
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
/* cleanup heap allocation */
for (int y=0; y<height; y++)
iftFree(row_pointers[y]);
iftFree(row_pointers);
fclose(fp);
}
#endif
void iftWriteImagePNG(const iftImage* img, const char* format, ...)
{
#if IFT_LIBPNG
png_bytep *row_pointers;
int width, height, depth, byteshift;
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
width = img->xsize;
height = img->ysize;
png_byte color_type;
depth = iftImageDepth(img);
if(depth<=8) {
depth = 8;
} else {
depth = 16;
}
byteshift = depth/8;
//int offset = depth==16?1:0;//to read the second byte first in cases of 16bit images
size_t numberOfChannels=1;
if(iftIsColorImage(img)){
if(img->alpha == NULL){
numberOfChannels = 3;//RGB
color_type = PNG_COLOR_TYPE_RGB;
}else{
numberOfChannels = 4;//RGB_ALPHA
color_type = PNG_COLOR_TYPE_RGB_ALPHA;
}
}else{
if(img->alpha == NULL){
numberOfChannels = 1;//GRAY
color_type = PNG_COLOR_TYPE_GRAY;
}else{
numberOfChannels = 2;//GRAY_ALPHA
color_type = PNG_COLOR_TYPE_GRAY_ALPHA;
}
}
//size_t pixel_size = (iftIsColorImage(img)?3:1 ) * byteshift;
row_pointers = (png_bytep*) iftAlloc(height, sizeof(png_bytep));
for (int y=0; y<height; y++)
row_pointers[y] = (png_byte*) iftAlloc(width, numberOfChannels*byteshift);
if(color_type == PNG_COLOR_TYPE_GRAY){
int p = 0;
for (int y = 0; y < height; ++y) {
png_byte* row = row_pointers[y];
for (int x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberOfChannels*byteshift]);
ptr[0] = img->val[p] & 0xFF;//get first byte
if(depth==16) {//in 16bit image, we should store as big endian
ptr[1] = ptr[0];
ptr[0] = (img->val[p]>>8) & 0xFF;//get second byte
}
p++;
}
}
}else if(color_type == PNG_COLOR_TYPE_GRAY_ALPHA){
int p = 0;
for (int y = 0; y < height; ++y) {
png_byte* row = row_pointers[y];
for (int x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberOfChannels*byteshift]);
if(depth==8){
ptr[0] = img->val[p] & 0xFF;//get first byte
ptr[1] = img->alpha[p] & 0xFF;//get second byte
}
if(depth==16) {//in 16bit image, we should store as big endian
ptr[0] = img->val[p]>>8;//get first byte
ptr[1] = img->val[p] & 0xFF;//get second byte
ptr[2] = img->alpha[p]>>8;//get first byte;
ptr[3] = img->alpha[p] & 0xFF;;//get second byte
}
p++;
}
}
}else if(color_type == PNG_COLOR_TYPE_RGB){
iftColor rgb, ycbcr;
int p = 0;
for (int y = 0; y < height; ++y) {
png_byte* row = row_pointers[y];
for (int x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberOfChannels*byteshift]);
ycbcr.val[0] = img->val[p];
ycbcr.val[1] = img->Cb[p];
ycbcr.val[2] = img->Cr[p];
rgb = iftYCbCrBT2020toRGB(ycbcr, depth, depth);
ptr[0*byteshift] = rgb.val[0] & 0xFF;//get first byte
ptr[1*byteshift] = rgb.val[1] & 0xFF;
ptr[2*byteshift] = rgb.val[2] & 0xFF;
if(depth==16) {//in 16bit image, we should store as big endian
ptr[(0*byteshift)+1] = ptr[0*byteshift];
ptr[(1*byteshift)+1] = ptr[1*byteshift];
ptr[(2*byteshift)+1] = ptr[2*byteshift];
ptr[0*byteshift] = ((rgb.val[0]>>8) & 0xFF);//get second byte
ptr[1*byteshift] = ((rgb.val[1]>>8) & 0xFF);
ptr[2*byteshift] = ((rgb.val[2]>>8) & 0xFF);
}
p++;
}
}
}else if(color_type == PNG_COLOR_TYPE_RGB_ALPHA){
iftColor rgb, ycbcr;
int p = 0;
for (int y = 0; y < height; ++y) {
png_byte* row = row_pointers[y];
for (int x=0; x<width; x++) {
png_byte* ptr = &(row[x*numberOfChannels*byteshift]);
ycbcr.val[0] = img->val[p];
ycbcr.val[1] = img->Cb[p];
ycbcr.val[2] = img->Cr[p];
ushort alpha = img->alpha[p];
rgb = iftYCbCrBT2020toRGB(ycbcr, depth, depth);
ptr[0*byteshift] = rgb.val[0] & 0xFF;//get first byte
ptr[1*byteshift] = rgb.val[1] & 0xFF;
ptr[2*byteshift] = rgb.val[2] & 0xFF;
ptr[3*byteshift] = alpha & 0xFF;
if(depth==16) {//in 16bit image, we should store as big endian
ptr[(0*byteshift)+1] = ptr[0*byteshift];
ptr[(1*byteshift)+1] = ptr[1*byteshift];
ptr[(2*byteshift)+1] = ptr[2*byteshift];
ptr[(3*byteshift)+1] = ptr[(3*byteshift)];
ptr[0*byteshift] = ((rgb.val[0]>>8) & 0xFF);//get second byte
ptr[1*byteshift] = ((rgb.val[1]>>8) & 0xFF);
ptr[2*byteshift] = ((rgb.val[2]>>8) & 0xFF);
ptr[(3*byteshift)] = ((alpha>>8) & 0xFF);
}
p++;
}
}
}else{
iftError("Unknwon color scape", "iftWriteImagePNG");
};
iftWritePngImageAux(filename, row_pointers, width, height, depth, color_type);
#else
iftError("LibPNG support was not enabled!","iftWriteImagePNG");
#endif
}
void iftWriteImageJPEG(const iftImage* img, const char* format, ...)
{
#if IFT_LIBJPEG
va_list args;
char filename[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(filename, format, args);
va_end(args);
//code based on external/libjpeg/source/example.c
/* This struct contains the JPEG compression parameters and pointers to
* working space (which is allocated as needed by the JPEG library).
* It is possible to have several such structures, representing multiple
* compression/decompression processes, in existence at once. We refer
* to any one struct (and its associated working data) as a "JPEG object".
*/
struct jpeg_compress_struct cinfo;
/* This struct represents a JPEG error handler. It is declared separately
* because applications often want to supply a specialized error handler
* (see the second half of this file for an example). But here we just
* take the easy way out and use the standard error handler, which will
* print a message on stderr and call exit() if compression fails.
* Note that this struct must live as long as the main JPEG parameter
* struct, to avoid dangling-pointer problems.
*/
struct jpeg_error_mgr jerr;
/* More stuff */
FILE * outfile; /* target file */
JSAMPARRAY buffer;
/* Step 1: allocate and initialize JPEG compression object */
/* We have to set up the error handler first, in case the initialization
* step fails. (Unlikely, but it could happen if you are out of memory.)
* This routine fills in the contents of struct jerr, and returns jerr's
* address which we place into the link field in cinfo.
*/
cinfo.err = jpeg_std_error(&jerr);
/* Now we can initialize the JPEG compression object. */
jpeg_create_compress(&cinfo);
/* Step 2: specify data destination (eg, a file) */
/* Note: steps 2 and 3 can be done in either order. */
/* Here we use the library-supplied code to send compressed data to a
* stdio stream. You can also write your own code to do something else.
* VERY IMPORTANT: use "b" option to fopen() if you are on a machine that
* requires it in order to write binary files.
*/
if ((outfile = fopen(filename, "wb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
exit(1);
}
jpeg_stdio_dest(&cinfo, outfile);
/* First we supply a description of the input image.
* Four fields of the cinfo struct must be filled in:
*/
cinfo.image_width = img->xsize; /* image width and height, in pixels */
cinfo.image_height = img->ysize;
cinfo.data_precision = iftImageDepth(img);
cinfo.input_components = 3;
cinfo.in_color_space = JCS_YCbCr;
cinfo.jpeg_color_space = JCS_YCbCr;
/* Now use the library's routine to set default compression parameters.
* (You must set at least cinfo.in_color_space before calling this,
* since the defaults depend on the source color space.)
*/
jpeg_set_defaults(&cinfo);
/* Now you can set any non-default parameters you wish to.
* Here we just illustrate the use of quality (quantization table) scaling:
*/
int quality = 100;
jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */);
/* Step 4: Start compressor */
/* TRUE ensures that we will write a complete interchange-JPEG file.
* Pass TRUE unless you are very sure of what you're doing.
*/
jpeg_start_compress(&cinfo, TRUE);
/* Step 5: while (scan lines remain to be written) */
/* jpeg_write_scanlines(...); */
/* Here we use the library's state variable cinfo.next_scanline as the
* loop counter, so that we don't have to keep track ourselves.
* To keep things simple, we pass one scanline per call; you can pass
* more if you wish, though.
*/
int row_stride = cinfo.image_width * cinfo.num_components;
buffer = (*cinfo.mem->alloc_sarray)
((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
unsigned int imageRow = 0;
while (cinfo.next_scanline < cinfo.image_height) {
/* jpeg_write_scanlines expects an array of pointers to scanlines.
* Here the array is only one element long, but you could pass
* more than one scanline at a time if that's more convenient.
*/
unsigned int imageCol = 0;
for (unsigned int i = 0; i < (unsigned int)cinfo.image_width; i++) {
int shift = i*cinfo.num_components;
buffer[0][(shift+0)] = (unsigned char)iftImgVal(img,imageCol,imageRow,0);
buffer[0][(shift+1)] = (unsigned char)iftImgCb(img,imageCol,imageRow,0);
buffer[0][(shift+2)] = (unsigned char)iftImgCr(img,imageCol,imageRow,0);
imageCol++;
}
imageRow++;
(void) jpeg_write_scanlines(&cinfo, buffer, 1);
}
/* Step 6: Finish compression */
jpeg_finish_compress(&cinfo);
/* After finish_compress, we can close the output file. */
fclose(outfile);
/* Step 7: release JPEG compression object */
/* This is an important step since it will release a good deal of memory. */
jpeg_destroy_compress(&cinfo);
#else
iftError("LibPNG support was not enabled!","iftWriteImageJPEG");
#endif
}
int iftMaximumValueInRegion(const iftImage *img, iftBoundingBox bb)
{
// checkers
if (img == NULL)
iftError("Image is NULL", "iftMaximumValueInRegion");
if (!iftValidVoxel(img, bb.begin))
iftError("Beginning voxel (%d, %d, %d) from Region (Bound. Box) is not in the Image Domain\n" \
"Img (xsize, ysize, zsize): (%d, %d, %d)", "iftMaximumValueInRegion",
bb.begin.x, bb.begin.y, bb.begin.z, img->xsize, img->ysize, img->zsize);
if (!iftValidVoxel(img, bb.end))
iftError("Ending voxel (%d, %d, %d) from Region (Bound. Box) is not in the Image Domain\n" \
"Img (xsize, ysize, zsize): (%d, %d, %d)", "iftMaximumValueInRegion",
bb.end.x, bb.end.y, bb.end.z, img->xsize, img->ysize, img->zsize);
int img_max_val = IFT_INFINITY_INT_NEG;
iftVoxel v;
for (v.z = bb.begin.z; v.z <= bb.end.z; v.z++) {
for (v.y = bb.begin.y; v.y <= bb.end.y; v.y++) {
for (v.x = bb.begin.x; v.x <= bb.end.x; v.x++) {
int p = iftGetVoxelIndex(img, v);
if (img_max_val < img->val[p]) {
img_max_val = img->val[p];
}
}
}
}
return img_max_val;
}
void iftSetImage(iftImage *img, int value)
{
for (int p = 0; p < img->n; p++)
img->val[p] = value;
}
void iftSetAlpha(iftImage *img, ushort value)
{
int p;
if(img->alpha == NULL){
img->alpha = iftAllocUShortArray(img->n);
}
for (p=0; p < img->n; p++) {
img->alpha[p] = value;
}
}
void iftSetCbCr(iftImage *img, ushort value)
{
int p;
if (!iftIsColorImage(img)){
img->Cb = iftAllocUShortArray(img->n);
img->Cr = iftAllocUShortArray(img->n);
}
for (p=0; p < img->n; p++) {
img->Cb[p] = value;
img->Cr[p] = value;
}
}
void iftVerifyImageDomains(const iftImage *img1, const iftImage *img2, const char *function)
{
if ((img1==NULL)||(img2==NULL)||(img1->xsize!=img2->xsize) || (img1->ysize!=img2->ysize) || (img1->zsize!=img2->zsize)) {
iftError("Images with different domains:\n" \
"img1 (xsize, ysize, zsize): (%d, %d, %d)\n" \
"img2 (xsize, ysize, zsize): (%d, %d, %d)\n",
function,
img1->xsize, img1->ysize, img1->zsize, img2->xsize, img2->ysize, img2->zsize);
}
}
uchar iftImageDepth(const iftImage *img)
{
int img_min, img_max;
iftMinMaxValues(img, &img_min, &img_max);
long max_range;
if (img_min >= 0)
max_range = iftNormalizationValue(img_max) + 1;
else
max_range = iftNormalizationValue(img_max - img_min) + 1;
return (uchar) iftLog(max_range, 2);
}
void iftMinMaxValues(const iftImage *img, int *min, int *max)
{
*min = *max = img->val[0];
for (int p = 1; p < img->n; p++) {
if (img->val[p] < *min)
*min = img->val[p];
else if (img->val[p] > *max)
*max = img->val[p];
}
}
iftImage *iftCreateImageFromImage(const iftImage *src)
{
iftImage *out = NULL;
if (src != NULL) {
if (iftIsColorImage(src)) {
out = iftCreateColorImage(src->xsize, src->ysize, src->zsize, iftImageDepth(src));
}
else {
out = iftCreateImage(src->xsize, src->ysize, src->zsize);
}
iftCopyVoxelSize(src, out);
}
return out;
}
iftImage *iftCreateColorImage(int xsize,int ysize,int zsize, int depth)
{
iftImage *img=NULL;
img = iftCreateImage(xsize, ysize, zsize);
iftSetCbCr(img, (iftMaxImageRange(depth)+1)/2);
return(img);
}
void iftPutXYSlice(iftImage *img, const iftImage *slice, int zcoord)
{
iftVoxel u;
int p,q;
if ( (zcoord < 0) || (zcoord >= img->zsize))
iftError("Invalid z coordinate", "iftPutXYSlice");
if ( (img->ysize!=slice->ysize)||(img->xsize!=slice->xsize) )
iftError("Image and slice are incompatibles", "iftPutXYSlice");
u.z = zcoord;
p = 0;
#if IFT_OMP
#pragma omp parallel for private(p,q)
#endif
for (int y = 0; y < img->ysize; y++)
for (int x = 0; x < img->xsize; x++)
{
u.x = x; u.y = y;
q = iftGetVoxelIndex(img,u);
img->val[q] = slice->val[p];
if(iftIsColorImage(img))
{
img->Cb[q] = slice->Cb[p];
img->Cr[q] = slice->Cr[p];
}
p++;
}
}
iftImage *iftGetXYSlice(const iftImage *img, int zcoord)
{
iftImage *slice;
iftVoxel u;
int p,q;
if ( (zcoord < 0) || (zcoord >= img->zsize))
iftError("Invalid z coordinate", "iftGetXYSlice");
if(iftIsColorImage(img))
slice = iftCreateColorImage(img->xsize,img->ysize,1, iftImageDepth(img));
else
slice = iftCreateImage(img->xsize,img->ysize,1);
u.z = zcoord;
q = 0;
#if IFT_OMP
#pragma omp parallel for private(p,q)
#endif
for (int y = 0; y < img->ysize; y++)
for (int x = 0; x < img->xsize; x++)
{
u.x = x; u.y = y;
p = iftGetVoxelIndex(img,u);
slice->val[q] = img->val[p];
if(iftIsColorImage(img))
{
slice->Cb[q] = img->Cb[p];
slice->Cr[q] = img->Cr[p];
}
q++;
}
iftCopyVoxelSize(img,slice);
return(slice);
}
iftImage *iftBMapToBinImage(const iftBMap *bmap, int xsize, int ysize, int zsize) {
if (bmap == NULL)
iftError("Bin Map is NULL", "iftBMapToBinImage");
int n = xsize * ysize * zsize;
if (bmap->n != n)
iftError("Image Domain is != of the Bin Map Size\n" \
"Img Domain: (%d, %d, %d) = %d spels\nBin Map: %d elems", "iftBMapToBinImage",
xsize, ysize, zsize, n, bmap->n);
iftImage *bin = iftCreateImage(xsize, ysize, zsize);
for (int p = 0; p < bmap->n; p++)
bin->val[p] = iftBMapValue(bmap, p);
return bin;
}
iftBMap *iftBinImageToBMap(const iftImage *bin_img) {
if (bin_img == NULL)
iftError("Bin Image is NULL", "iftBinImageToBMap");
iftBMap *bmap = iftCreateBMap(bin_img->n);
for (int i = 0; i < bin_img->n; i++)
if (bin_img->val[i])
iftBMapSet1(bmap, i);
return bmap;
}
// ---------- iftImage.c end
// ---------- iftMatrix.c start
iftMatrix *iftCreateMatrix(int ncols, int nrows)
{
iftMatrix *M = (iftMatrix *) iftAlloc(1, sizeof(iftMatrix));
M->ncols = ncols;
M->nrows = nrows;
M->tbrow = (long*)iftAlloc(nrows,sizeof(long));
//M->tbrow = iftAllocIntArray(nrows);
for (long r = 0; r < (long)nrows; r++) {
M->tbrow[r] = r * ncols;
}
M->n = (long) ncols * (long) nrows;
M->allocated = true;
M->val = iftAllocFloatArray(M->n);
return (M);
}
iftMatrix *iftCopyMatrix(const iftMatrix *A)
{
iftMatrix *B;
int i;
B = iftCreateMatrix(A->ncols, A->nrows);
for (i = 0; i < A->n; i++)
B->val[i] = A->val[i];
return (B);
}
void iftDestroyMatrix(iftMatrix **M)
{
if (M != NULL) {
iftMatrix *aux = *M;
if (aux != NULL) {
if (aux->allocated && aux->val != NULL)
iftFree(aux->val);
iftFree(aux->tbrow);
iftFree(aux);
}
*M = NULL;
}
}
// ---------- iftMatrix.c end
// ---------- iftMImage.c start
iftMImage * iftCreateMImage(int xsize,int ysize,int zsize, int nbands)
{
iftMImage *img=NULL;
int i,y,z,xysize;
img = (iftMImage *) iftAlloc(1,sizeof(iftMImage));
if (img == NULL){
iftError(MSG_MEMORY_ALLOC_ERROR, "iftCreateMImage");
}
img->n = xsize*ysize*zsize;
img->m = nbands;
img->data = iftCreateMatrix(img->m, img->n);
img->val = iftAlloc(img->n, sizeof *img->val);
for (i = 0; i < img->n; i++)
img->val[i] = iftMatrixRowPointer(img->data, i);
img->xsize = xsize;
img->ysize = ysize;
img->zsize = zsize;
img->dx = 1.0;
img->dy = 1.0;
img->dz = 1.0;
img->tby = iftAllocIntArray(ysize);
img->tbz = iftAllocIntArray(zsize);
img->tby[0]=0;
for (y=1; y < ysize; y++)
img->tby[y]=img->tby[y-1] + xsize;
img->tbz[0]=0; xysize = xsize*ysize;
for (z=1; z < zsize; z++)
img->tbz[z]=img->tbz[z-1] + xysize;
return(img);
}
void iftDestroyMImage(iftMImage **img)
{
iftMImage *aux;
aux = *img;
if (aux != NULL) {
if (aux->val != NULL)
iftFree(aux->val);
if (aux->data != NULL)
iftDestroyMatrix(&aux->data);
if (aux->tby != NULL)
iftFree(aux->tby);
if (aux->tbz != NULL)
iftFree(aux->tbz);
iftFree(aux);
*img = NULL;
}
}
iftMImage * iftImageToMImage(const iftImage *img1, char color_space)
{
iftMImage *img2=NULL;
int normalization_value = iftNormalizationValue(iftMaximumValue(img1));
switch (color_space) {
case YCbCr_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
img2->val[p][0]=((float)img1->val[p]);
img2->val[p][1]=((float)img1->Cb[p]);
img2->val[p][2]=((float)img1->Cr[p]);
}
break;
case YCbCrNorm_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
img2->val[p][0]=((float)img1->val[p])/(float)normalization_value;
img2->val[p][1]=((float)img1->Cb[p])/(float)normalization_value;
img2->val[p][2]=((float)img1->Cr[p])/(float)normalization_value;
}
break;
case LAB_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
iftColor YCbCr,RGB;
iftFColor Lab;
YCbCr.val[0] = img1->val[p];
YCbCr.val[1] = img1->Cb[p];
YCbCr.val[2] = img1->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr,normalization_value);
Lab = iftRGBtoLab(RGB,normalization_value);
img2->val[p][0]=Lab.val[0];
img2->val[p][1]=Lab.val[1];
img2->val[p][2]=Lab.val[2];
}
break;
case LABNorm_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
iftColor YCbCr,RGB;
iftFColor Lab;
YCbCr.val[0] = img1->val[p];
YCbCr.val[1] = img1->Cb[p];
YCbCr.val[2] = img1->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr,normalization_value);
Lab = iftRGBtoLabNorm(RGB,normalization_value);
img2->val[p][0]=Lab.val[0];
img2->val[p][1]=Lab.val[1];
img2->val[p][2]=Lab.val[2];
}
break;
case LABNorm2_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
iftColor YCbCr,RGB;
iftFColor LabNorm;
YCbCr.val[0] = img1->val[p];
YCbCr.val[1] = img1->Cb[p];
YCbCr.val[2] = img1->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr,normalization_value);
LabNorm = iftRGBtoLabNorm2(RGB,normalization_value);
img2->val[p][0]=LabNorm.val[0];
img2->val[p][1]=LabNorm.val[1];
img2->val[p][2]=LabNorm.val[2];
}
break;
case RGB_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
iftColor YCbCr,RGB;
YCbCr.val[0] = img1->val[p];
YCbCr.val[1] = img1->Cb[p];
YCbCr.val[2] = img1->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr,normalization_value);
img2->val[p][0]=((float)RGB.val[0]);
img2->val[p][1]=((float)RGB.val[1]);
img2->val[p][2]=((float)RGB.val[2]);
}
break;
case RGBNorm_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
iftColor YCbCr,RGB;
YCbCr.val[0] = img1->val[p];
YCbCr.val[1] = img1->Cb[p];
YCbCr.val[2] = img1->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr,normalization_value);
img2->val[p][0]=((float)RGB.val[0])/(float)normalization_value;
img2->val[p][1]=((float)RGB.val[1])/(float)normalization_value;
img2->val[p][2]=((float)RGB.val[2])/(float)normalization_value;
}
break;
case GRAY_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,1);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
img2->val[p][0]=((float)img1->val[p]);
}
break;
case GRAYNorm_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,1);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
img2->val[p][0]=((float)img1->val[p])/(float)normalization_value;
}
break;
case WEIGHTED_YCbCr_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
img2->val[p][0]=(0.2/2.2)*((float)img1->val[p]/(float)normalization_value);
img2->val[p][1]=(1.0/2.2)*((float)img1->Cb[p]/(float)normalization_value);
img2->val[p][2]=(1.0/2.2)*((float)img1->Cr[p]/(float)normalization_value);
}
break;
case HSV_CSPACE:
img2=iftCreateMImage(img1->xsize,img1->ysize,img1->zsize,3);
#if IFT_OMP
#pragma omp parallel for shared(img1, img2, normalization_value)
#endif
for (int p=0; p < img2->n; p++) {
iftColor YCbCr,RGB;
iftColor HSV;
YCbCr.val[0] = img1->val[p];
YCbCr.val[1] = img1->Cb[p];
YCbCr.val[2] = img1->Cr[p];
RGB = iftYCbCrtoRGB(YCbCr,normalization_value);
HSV = iftRGBtoHSV(RGB,normalization_value);
img2->val[p][0]=HSV.val[0];
img2->val[p][1]=HSV.val[1];
img2->val[p][2]=HSV.val[2];
}
break;
default:
iftError("Invalid color space (see options in iftColor.h)", "iftImageToMImage");
}
img2->dx = img1->dx;
img2->dy = img1->dy;
img2->dz = img1->dz;
return(img2);
}
iftImage * iftMImageToImage(const iftMImage *img1, int Imax, int band)
{
iftImage *img2=iftCreateImage(img1->xsize,img1->ysize,img1->zsize);
int p,b=band;
double min = IFT_INFINITY_FLT, max = IFT_INFINITY_FLT_NEG;
if ((band < 0)||(band >= img1->m))
iftError("Invalid band", "iftMImageToImage");
for (p=0; p < img1->n; p++) {
if (img1->val[p][b] < min)
min = img1->val[p][b];
if (img1->val[p][b] > max)
max = img1->val[p][b];
}
//printf("min %lf max %lf\n",min,max);
if (max > min){
for (p=0; p < img2->n; p++) {
img2->val[p]=(int)(Imax*(img1->val[p][b]-min)/(max-min));
}
}else{
char msg[100];
sprintf(msg,"Image is empty: max = %f and min = %f\n",max,min);
// iftWarning(msg,"iftMImageToImage");
}
img2->dx = img1->dx;
img2->dy = img1->dy;
img2->dz = img1->dz;
return(img2);
}
inline iftVoxel iftMGetVoxelCoord(const iftMImage *img, int p)
{
/* old
* u.x = (((p) % (((img)->xsize)*((img)->ysize))) % (img)->xsize)
* u.y = (((p) % (((img)->xsize)*((img)->ysize))) / (img)->xsize)
* u.z = ((p) / (((img)->xsize)*((img)->ysize)))
*/
iftVoxel u;
div_t res1 = div(p, img->xsize * img->ysize);
div_t res2 = div(res1.rem, img->xsize);
u.x = res2.rem;
u.y = res2.quot;
u.z = res1.quot;
return(u);
}
inline char iftMValidVoxel(const iftMImage *img, iftVoxel v)
{
if ((v.x >= 0)&&(v.x < img->xsize)&&
(v.y >= 0)&&(v.y < img->ysize)&&
(v.z >= 0)&&(v.z < img->zsize))
return(1);
else
return(0);
}
float iftMMaximumValue(const iftMImage *img, int band) {
int b, i;
float max_val = IFT_INFINITY_FLT_NEG;
if(band < 0) {
for(b = 0; b < img->m; b++) {
for(i = 0; i < img->n; i++) {
max_val = iftMax(img->val[i][b], max_val);
}
}
} else {
for(i = 0; i < img->n; i++) {
max_val = iftMax(img->val[i][band], max_val);
}
}
return max_val;
}
// ---------- iftMImage.c end
// ---------- iftKernel.c start
iftKernel *iftCreateKernel(iftAdjRel *A)
{
iftKernel *K=(iftKernel *) iftAlloc(1,sizeof(iftKernel));
K->A = iftCopyAdjacency(A);
K->weight = iftAllocFloatArray(K->A->n);
return(K);
}
void iftDestroyKernel(iftKernel **K)
{
iftKernel *aux=*K;
if (aux != NULL) {
iftDestroyAdjRel(&aux->A);
iftFree(aux->weight);
iftFree(aux);
*K = NULL;
}
}
// ---------- iftKernel.c end
// ---------- iftMemory.c start
#ifdef __linux__
#include <sys/sysinfo.h>
#include <malloc.h>
#endif
#ifdef __APPLE__
#include <mach/task.h>
#include <mach/mach_init.h>
#endif
#ifdef _WINDOWS
#include <windows.h>
#else
#include <sys/resource.h>
#endif
int *iftAllocIntArray(long n)
{
int *v = NULL;
v = (int *) iftAlloc(n, sizeof(int));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocIntArray");
return(v);
}
void iftCopyIntArray(int *array_dst, const int *array_src, int nelems)
{
#if IFT_OMP
#pragma omp parallel for
#endif
for (int i = 0; i < nelems; i++) {
array_dst[i] = array_src[i];
}
}
#ifndef __cplusplus
long long *iftAllocLongLongIntArray(long n)
{
long long *v = NULL;
v = (long long *) iftAlloc(n, sizeof(long long));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocLongLongIntArray");
return v;
}
void iftCopyLongLongIntArray(long long *array_dst, const long long *array_src, int nelems)
{
#if IFT_OMP
#pragma omp parallel for
#endif
for (int i = 0; i < nelems; ++i) {
array_dst[i] = array_src[i];
}
}
#endif
float *iftAllocFloatArray(long n)
{
float *v = NULL;
v = (float *) iftAlloc(n, sizeof(float));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocFloatArray");
return(v);
}
void iftCopyFloatArray(float *array_dst, float *array_src, int nelems)
{
memmove(array_dst, array_src, nelems*sizeof(float));
}
double *iftAllocDoubleArray(long n)
{
double *v = NULL;
v = (double *) iftAlloc(n, sizeof(double));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocDoubleArray");
return (v);
}
void iftCopyDoubleArray(double *array_dst, double *array_src, int nelems)
{
#if IFT_OMP
#pragma omp parallel for
#endif
for (int i = 0; i < nelems; i++)
array_dst[i] = array_src[i];
}
ushort *iftAllocUShortArray(long n)
{
ushort *v = NULL;
v = (ushort *) iftAlloc(n, sizeof(ushort));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocUShortArray");
return(v);
}
uchar *iftAllocUCharArray(long n)
{
uchar *v = NULL;
v = (uchar *) iftAlloc(n, sizeof(uchar));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocUCharArray");
return (v);
}
char *iftAllocCharArray(long n)
{
char *v = NULL;
v = (char *) iftAlloc(n, sizeof(char));
if (v == NULL)
iftError("Cannot allocate memory space", "iftAllocCharArray");
return (v);
}
char *iftAllocString(long n)
{
return (iftAllocCharArray(n+1));
}
// ---------- iftMemory.c end
// ---------- iftSet.c start
void iftInsertSet(iftSet **S, int elem)
{
iftSet *p=NULL;
p = (iftSet *) iftAlloc(1,sizeof(iftSet));
if (p == NULL) iftError(MSG_MEMORY_ALLOC_ERROR, "iftInsertSet");
if (*S == NULL){
p->elem = elem;
p->next = NULL;
}else{
p->elem = elem;
p->next = *S;
}
*S = p;
}
int iftRemoveSet(iftSet **S)
{
iftSet *p;
int elem = IFT_NIL;
if (*S != NULL){
p = *S;
elem = p->elem;
*S = p->next;
iftFree(p);
}
return(elem);
}
void iftRemoveSetElem(iftSet **S, int elem)
{
if (S == NULL || *S == NULL)
return;
iftSet *tmp = *S;
if (tmp->elem == elem) {
*S = tmp->next;
iftFree(tmp);
} else {
while (tmp->next != NULL && tmp->next->elem != elem)
tmp = tmp->next;
if (tmp->next == NULL)
return;
iftSet *remove = tmp->next;
tmp->next = remove->next;
iftFree(remove);
}
}
void iftDestroySet(iftSet **S)
{
iftSet *p;
while(*S != NULL){
p = *S;
*S = p->next;
iftFree(p);
}
*S = NULL;
}
iftSet* iftSetUnion(iftSet *S1,iftSet *S2)
{
iftSet *S = 0;
iftSet *s = S1;
while(s){
iftInsertSet(&S,s->elem);
s = s->next;
}
s = S2;
while(s){
iftUnionSetElem(&S,s->elem);
s = s->next;
}
return S;
}
iftSet* iftSetConcat(iftSet *S1,iftSet *S2)
{
iftSet *S = 0;
iftSet *s = S1;
while(s){
iftInsertSet(&S,s->elem);
s = s->next;
}
s = S2;
while(s){
iftInsertSet(&S,s->elem);
s = s->next;
}
return S;
}
char iftUnionSetElem(iftSet **S, int elem)
{
iftSet *aux=*S;
while (aux != NULL) {
if (aux->elem == elem)
return(0);
aux = aux->next;
}
iftInsertSet(S,elem);
return(1);
}
void iftInvertSet(iftSet **S)
{
iftSet *set=NULL;
while (*S != NULL)
iftInsertSet(&set,iftRemoveSet(S));
*S = set;
}
int iftSetSize(const iftSet* S)
{
const iftSet *s = S;
int i = 0;
while (s != NULL){
i++;
s = s->next;
}
return i;
}
iftSet* iftSetCopy(iftSet* S)
{
return iftSetUnion(S,0);
}
int iftSetHasElement(iftSet *S, int elem)
{
iftSet *s = S;
while(s){
if(s->elem == elem)
return 1;
s = s->next;
}
return 0;
}
iftIntArray *iftSetToArray(iftSet *S) {
int n_elems = iftSetSize(S);
iftIntArray *array = iftCreateIntArray(n_elems);
iftSet *sp = S;
int i = 0;
while (sp != NULL) {
array->val[i++] = sp->elem;
sp = sp->next;
}
return array;
}
// ---------- iftSet.c end
// ---------- iftSList.c start
iftSList *iftCreateSList()
{
iftSList *SL = (iftSList *) iftAlloc(1, sizeof(iftSList));
// just to really force
SL->n = 0;
SL->head = NULL;
SL->tail = NULL;
return SL;
}
void iftDestroySList(iftSList **SL)
{
if (SL != NULL) {
iftSList *SL_aux = *SL;
if (SL_aux != NULL) {
iftSNode *snode = SL_aux->head;
iftSNode *tnode = NULL;
while (snode != NULL) {
tnode = snode;
snode = snode->next;
if (tnode->elem != NULL)
iftFree(tnode->elem);
iftFree(tnode);
}
iftFree(SL_aux);
*SL = NULL;
}
}
}
void iftInsertSListIntoHead(iftSList *SL, const char *elem)
{
if (SL == NULL)
iftError("The String Linked List SL is NULL. Allocated it first", "iftInsertSListIntoHead");
iftSNode *snode = (iftSNode*) iftAlloc(1, sizeof(iftSNode));
snode->elem = iftAllocCharArray(512);
snode->prev = NULL;
snode->next = NULL;
strcpy(snode->elem, elem);
// The String Linked List is empty
if (SL->head == NULL) {
SL->head = snode;
SL->tail = snode;
SL->n = 1;
}
else {
snode->next = SL->head;
SL->head->prev = snode;
SL->head = snode;
SL->n++;
}
}
void iftInsertSListIntoTail(iftSList *SL, const char *elem)
{
if (SL == NULL)
iftError("The String Linked List SL is NULL. Allocated it first", "iftInsertSListIntoTail");
if (elem == NULL)
iftError("The Element to be Inserted is NULL", "iftInsertSListIntoTail");
iftSNode *snode = (iftSNode*) iftAlloc(1, sizeof(iftSNode));
snode->prev = NULL;
snode->next = NULL;
snode->elem = iftAllocCharArray(strlen(elem) + 1);
strcpy(snode->elem, elem);
// The String Linked List is empty
if (SL->head == NULL) {
SL->head = snode;
SL->tail = snode;
SL->n = 1;
}
else {
SL->tail->next = snode;
snode->prev = SL->tail;
SL->tail = snode;
SL->n++;
}
}
char *iftRemoveSListHead(iftSList *SL)
{
if (SL == NULL)
iftError("The String Linked List SL is NULL. Allocated it first", "iftRemoveSListHead");
char *elem = NULL;
iftSNode *snode = NULL;
// if there are elements
if (SL->head != NULL) {
snode = SL->head;
SL->head = SL->head->next;
// checks if the list is empty now
if (SL->head == NULL)
SL->tail = NULL;
else
SL->head->prev = NULL;
SL->n--;
elem = snode->elem;
snode->elem = NULL;
iftFree(snode); // it does not deallocates snode->free
}
return elem;
}
char *iftRemoveSListTail(iftSList *SL)
{
if (SL == NULL)
iftError("The String Linked List SL is NULL. Allocated it first", "iftRemoveSListTail");
char *elem = NULL;
iftSNode *snode = NULL;
// if there are elements
if (SL->head != NULL) {
snode = SL->tail;
SL->tail = SL->tail->prev;
// checks if the list is empty now
if (SL->tail == NULL)
SL->head = NULL;
else
SL->tail->next = NULL;
SL->n--;
elem = snode->elem;
snode->elem = NULL;
iftFree(snode); // it does not deallocates snode->free
}
return elem;
}
// ---------- iftSList.c end
// ---------- iftDialog.c start
void iftError(const char *msg, const char *func, ...)
{
va_list args;
char final_msg[4096];
va_start(args, func);
vsprintf(final_msg, msg, args);
va_end(args);
fprintf(stderr, "\nError in %s: \n%s\n", func, final_msg);
fflush(stdout);
exit(-1);
}
void iftWarning(const char *msg, const char *func, ...)
{
va_list args;
char final_msg[4096];
va_start(args, func);
vsprintf(final_msg, msg, args);
va_end(args);
fprintf(stdout, "\nWarning in %s: \n%s\n", func, final_msg);
}
// ---------- iftDialog.c end
// ---------- iftStream.c start
char *iftGetLine(FILE *stream) {
if (stream == NULL || feof(stream))
return NULL;
size_t buffer_size = 0;
char *line = NULL;
if (getline(&line, &buffer_size, stream) == -1) {
if (feof(stream)) {
if (line != NULL)
free(line);
return NULL;
} else
iftError("Error with getline command", "iftGetLine");
}
size_t str_len = strlen(line);
if (line[str_len-1] == '\n')
line[str_len-1] = '\0'; // without this, it reads the \n and does not put the \0 at the end
return line;
}
// ---------- iftStream.c end
// ---------- iftString.c start
void iftRightTrim(char* s, char c)
{
int idx = strlen(s) - 1;
while(s[idx] == c) {
idx--;
}
s[idx+1] = '\0';
}
iftSList *iftSplitString(const char *phrase, const char *delimiter)
{
if (phrase == NULL)
iftError("String to be splitted is NULL", "iftSplitString");
if (delimiter == NULL)
iftError("Delimiter is NULL", "iftSplitString");
char *buf = iftAllocString(strlen(phrase)+1);
const char *pr = phrase;
const char *loc = strstr(pr, delimiter); // pointer to first delimiter occurrence in the string
size_t length = strlen(delimiter);
size_t bytes;
iftSList *SL = iftCreateSList();
// build a list of sub-strings
while (loc != NULL) {
bytes = loc - pr;
strncpy(buf, pr, bytes);
buf[bytes] = '\0'; // \0 character must be added manually because strncpy does not do that, as opposed to other functions such as strcpy and sprintf
iftInsertSListIntoTail(SL, buf);
pr = loc + length;
loc = strstr(pr, delimiter);
}
// Copies the last substring to the left of the last delimiter found OR
// Copies the whole string if it doesn't have the delimiter
strcpy(buf, pr);
iftInsertSListIntoTail(SL, buf);
iftFree(buf);
return SL;
}
char *iftLowerString(const char *str)
{
if (str == NULL)
iftError("Input string is NULL", "iftLowerString");
char *out_str = iftAllocCharArray(strlen(str)+1);
for (size_t c = 0; c < strlen(str); c++)
out_str[c] = tolower(str[c]);
return out_str;
}
bool iftCompareStrings(const char *str1, const char *str2)
{
if (str1 == NULL)
iftError("First String is NULL", "iftCompareStrings");
if (str2 == NULL)
iftError("Second String is NULL", "iftCompareStrings");
return (strcmp(str1, str2) == 0);
}
char *iftSplitStringAt(const char *phrase, const char *delimiter, long position)
{
if (phrase == NULL)
iftError("String to be splitted is NULL", "iftSplitStringAt");
if (delimiter == NULL)
iftError("Delimiter is NULL", "iftSplitStringAt");
iftSList *SL = iftSplitString(phrase, delimiter);
iftSNode *snode = NULL;
// Copies the split sub-string of the position
if (position >= 0) {
if ((position+1) <= SL->n) {
snode = SL->head;
for (size_t i = 0; i < (ulong)position; i++)
snode = snode->next;
}
else {
iftError("Invalid Position %ld\n-> Position Index must be < %ld\n",
"iftSplitStringAt", position, SL->n);
}
} else {
if (labs(position) <= SL->n) {
long real_pos = SL->n + position;
snode = SL->tail;
for (size_t i = SL->n-1; i > (ulong)real_pos; i--) {
snode = snode->prev;
}
}
else {
iftError("Invalid Negative Position %ld\n-> Negative Position Index must be >= %ld\n",
"iftSplitStringAt", position, -1 * SL->n);
}
}
char *str = iftCopyString(snode->elem);
iftDestroySList(&SL);
return str;
}
char *iftCopyString(const char *format, ...)
{
va_list args;
char str[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(str, format, args);
va_end(args);
char *copy = iftAllocCharArray(strlen(str) + 1);
strcpy(copy, str);
return copy;
}
char *iftRemoveSuffix(const char *str, const char *suffix)
{
if (str == NULL)
iftError("String is NULL", "iftRemoveSuffix");
if (suffix == NULL)
iftError("Suffix is NULL", "iftRemoveSuffix");
if (!iftCompareStrings(suffix, "") && iftEndsWith(str, suffix)) {
size_t shift = strlen(str) - strlen(suffix);
char *out_str = iftCopyString(str);
out_str[shift] = '\0';
return (out_str);
}
else {
return iftCopyString(str);
}
}
bool iftEndsWith(const char *str, const char *suffix)
{
if (str == NULL)
iftError("String is NULL", "iftEndsWith");
if (suffix == NULL)
iftError("Suffix is NULL", "iftEndsWith");
size_t len_suffix = strlen(suffix);
size_t len_str = strlen(str);
if (len_suffix <= len_str) {
size_t shift = len_str - len_suffix;
return (strncmp(str+shift, suffix, len_suffix) == 0);
}
else
return false;
}
bool iftStartsWith(const char *str, const char *prefix)
{
if (str == NULL)
iftError("String is NULL", "iftStartsWith");
if (prefix == NULL)
iftError("Prefix is NULL", "iftStartsWith");
size_t len_prefix = strlen(prefix);
size_t len_str = strlen(str);
if (len_prefix <= len_str)
return (strncmp(str, prefix, len_prefix) == 0);
else
return false;
}
char *iftConcatStrings(int n, ...)
{
if (n <= 0)
iftError("Number of Strings to be concatenated is <= 0", "iftConcatStrings");
size_t out_str_size = 1; // '\0'
// Counts the size of the concatenated string
va_list strings;
va_start(strings, n);
for (int i = 0; i < n; i++)
out_str_size += strlen(va_arg(strings, char*));
va_end(strings);
char *concat_str = iftAllocCharArray(out_str_size);
va_start(strings, n);
for (int i = 0; i < n; i++)
strcat(concat_str, va_arg(strings, char*));
va_end(strings);
return concat_str;
}
char *iftRemovePrefix(const char *str, const char *prefix)
{
if (str == NULL)
iftError("String is NULL", "iftRemovePrefix");
if (prefix == NULL)
iftError("Prefix is NULL", "iftRemovePrefix");
if (!iftCompareStrings(prefix, "") && iftStartsWith(str, prefix)) {
size_t shift = strlen(prefix);
return (iftCopyString(str + shift));
}
else
return (iftCopyString(str));
}
char *iftReplaceString(const char *str, const char *old_sub, const char *new_sub) {
if (str == NULL)
iftError("String is NULL", "iftReplaceString");
if (old_sub == NULL)
iftError("Old Substring is NULL", "iftReplaceString");
if (new_sub == NULL)
iftError("New Substring is NULL", "iftReplaceString");
iftSList *SL = iftSplitString(str, old_sub);
long n_sub = SL->n-1; // number of old subtrings found in str
size_t str_len = strlen(str) + (n_sub * strlen(new_sub)) + 1; // size of the replaced string (with '\0')
// adds the number of chars of the new substring + '\0'
str_len += (n_sub * strlen(new_sub)) + 1;
char *rep_str = iftAllocCharArray(str_len);
// builds the replaced String - the list has always at least one element
char *elem = iftRemoveSListHead(SL);
while ((elem != NULL) && (SL->n >= 1)) {
strcat(rep_str, elem);
strcat(rep_str, new_sub);
elem = iftRemoveSListHead(SL);
}
strcat(rep_str, elem); // copies the last element from the List
iftDestroySList(&SL);
return rep_str;
}
// ---------- iftString.c end
// ---------- iftDir.c start
int _iftCmpFiles(const void *a, const void *b)
{
iftFile **f1 = (iftFile**) a;
iftFile **f2 = (iftFile**) b;
return strcmp((*f1)->path, (*f2)->path);
}
int _iftCmpDirs(const void *a, const void *b)
{
iftDir **dir1 = (iftDir**) a;
iftDir **dir2 = (iftDir**) b;
return strcmp((*dir1)->path, (*dir2)->path);
}
void _iftCountFilesInDirectory(const char *dir_pathname, long *nfiles, long *nsubdirs)
{
//http://pubs.opengroup.org/onlinepubs/007908799/xsh/dirent.h.html
//http://www.delorie.com/gnu/docs/glibc/libc_270.html
DIR *system_dir;
struct dirent *entry;
char msg[512];
char *pathname = NULL;
*nfiles = 0;
*nsubdirs = 0;
system_dir = opendir(dir_pathname);
if (system_dir) {
while ((entry = readdir(system_dir)) != NULL)
// it excludes the system_dir . and ..
if ((strcmp(entry->d_name, ".") != 0) && (strcmp(entry->d_name, "..") != 0)) {
pathname = iftJoinPathnames(2, dir_pathname, entry->d_name);
if (iftDirExists(pathname)) {
(*nsubdirs)++;
}
else {
(*nfiles)++;
}
iftFree(pathname);
pathname = NULL;
}
closedir(system_dir);
}
else {
sprintf(msg, "Error opening directory path: \"%s\"", dir_pathname);
iftError(msg, "_iftCountFilesInDirectory");
}
}
void _iftListDirectoryRec(iftDir *dir, long hier_levels, long curr_level)
{
DIR *system_dir;
struct dirent *entry;
char *pathname = NULL;
dir->files = NULL;
dir->subdirs = NULL;
if (curr_level <= hier_levels) {
system_dir = opendir(dir->path);
if (system_dir) {
_iftCountFilesInDirectory(dir->path, &dir->nfiles, &dir->nsubdirs);
if (dir->nfiles != 0)
dir->files = (iftFile**) iftAlloc(dir->nfiles, sizeof(iftFile*));
if (dir->nsubdirs != 0)
dir->subdirs = (iftDir**) iftAlloc(dir->nsubdirs, sizeof(iftDir*));
long i = 0, j = 0;
while ((entry = readdir(system_dir)) != NULL) {
// it excludes the dir . and ..
if ((strcmp(entry->d_name, ".") != 0) && (strcmp(entry->d_name, "..") != 0)) {
pathname = iftJoinPathnames(2, dir->path, entry->d_name);
if (iftDirExists(pathname)) { // it is a directory
iftDir *subdir = (iftDir*) iftAlloc(1, sizeof(iftDir));
subdir->path = pathname;
subdir->nfiles = 0;
subdir->nsubdirs = 0;
subdir->files = NULL;
subdir->subdirs = NULL;
_iftListDirectoryRec(subdir, hier_levels, curr_level+1);
dir->subdirs[j++] = subdir;
}
else { // it is a File
iftFile *f = (iftFile*) iftAlloc(1, sizeof(iftFile));
f->path = pathname;
dir->files[i++] = f;
f->suffix = NULL;
}
}
}
closedir(system_dir);
/* sorts the pathnames using qsort functions */
qsort(dir->files, dir->nfiles, sizeof(iftFile*), _iftCmpFiles);
qsort(dir->subdirs, dir->nsubdirs, sizeof(iftDir*), _iftCmpDirs);
}
else {
char msg[512];
sprintf(msg, "Error opening directory path: \"%s\"", dir->path);
iftError(msg, "_iftListDirectoryRec");
}
}
}
void _iftListDirectory(iftDir *root_dir, long hier_levels)
{
if (root_dir == NULL) {
iftError("Directory is NULL", "_iftListDirectory");
}
else {
if (hier_levels == 0)
hier_levels = IFT_INFINITY_INT; // trick to set the hier_levels as the possible maximum
root_dir->nfiles = 0;
root_dir->nsubdirs = 0;
root_dir->files = NULL;
root_dir->subdirs = NULL;
long curr_level = 1;
_iftListDirectoryRec(root_dir, hier_levels, curr_level);
}
}
bool iftDirExists(const char *format, ...)
{
va_list args;
char pathname[IFT_STR_DEFAULT_SIZE];
va_start(args, format);
vsprintf(pathname, format, args);
va_end(args);
struct stat st;
if (stat(pathname, &st) == 0) {
if (S_ISDIR(st.st_mode))
return true; //it's a directory
}
return false ;
}
char *iftParentDir(const char *pathname)
{
char *filename = NULL;
char *parent_dir = NULL;
filename = iftSplitStringAt(pathname, IFT_SEP_C, -1);
parent_dir = iftRemoveSuffix(pathname, filename);
iftFree(filename);
// if the parent_dir is empty
if (strcmp(parent_dir, "") == 0) {
strcpy(parent_dir, ".");
}
else { // eliminates the slash (dir. separator char) at the end
parent_dir[strlen(parent_dir)-1] = '\0';
}
return (parent_dir);
}
void iftMakeDir(const char *dir_path)
{
if (!iftDirExists(dir_path)) {
char *parent_dir = iftAllocCharArray(IFT_STR_DEFAULT_SIZE);
strcpy(parent_dir, "");
iftSList *SL = iftSplitString(dir_path, IFT_SEP_C);
char *inter_dir = iftRemoveSListHead(SL);
while (inter_dir != NULL) {
strcat(parent_dir, inter_dir);
strcat(parent_dir, IFT_SEP_C);
if (!iftDirExists(parent_dir)) {
#if defined(__linux) || defined(__APPLE__)
if (mkdir(parent_dir, 0777) == -1) // Create the directory
iftError("Problem to create the directory: %s", "iftMakeDir", dir_path);
#else
if (!CreateDirectory(parent_dir, NULL))
iftError("Problem to create the directory", "iftMakeDir");
#endif
}
iftFree(inter_dir);
inter_dir = iftRemoveSListHead(SL);
}
iftDestroySList(&SL);
iftFree(parent_dir);
}
}
iftDir *iftLoadFilesFromDirByRegex(const char *dir_pathname, const char *regex)
{
if (dir_pathname == NULL)
iftError("Dir's Pathname is NULL", "iftLoadFilesFromDirByRegex");
if (regex == NULL)
iftError("Regex is NULL", "iftLoadFilesFromDirByRegex");
iftDir *dir = iftLoadDir(dir_pathname, 1);
long n_all_files = dir->nfiles;
long n_final_files = 0;
for (long f = 0; f < n_all_files; f++) {
char *filename = iftFilename(dir->files[f]->path, NULL);
if (iftRegexMatch(filename, regex))
n_final_files++;
iftFree(filename);
}
iftFile **file_array = dir->files;
dir->files = NULL;
dir->files = (iftFile**) iftAlloc(n_final_files, sizeof(iftFile*));
dir->nfiles = n_final_files;
long i = 0;
for (long f = 0; f < n_all_files; f++) {
char *filename = iftFilename(file_array[f]->path, NULL);
if (iftRegexMatch(filename, regex)) {
dir->files[i++] = file_array[f];
file_array[f] = NULL;
}
else iftDestroyFile(&file_array[f]);
iftFree(filename);
}
iftFree(file_array);
return dir;
}
iftDir *iftLoadDir(const char *dir_pathname, long hier_levels)
{
char msg[512];
iftDir *dir = NULL;
if (iftPathnameExists(dir_pathname)) {
// it is really a directory and it exists
if (iftDirExists(dir_pathname)) {
dir = (iftDir*) iftAlloc(1, sizeof(iftDir));
dir->path = iftAllocCharArray(strlen(dir_pathname) + 2); // one more char to put the separation '/'
strcpy(dir->path, dir_pathname);
// puts the '/' at the end of the pathname
if (dir->path[strlen(dir->path) - 1] != IFT_SEP_C[0])
strcat(dir->path, IFT_SEP_C);
_iftListDirectory(dir, hier_levels);
}
// it is a File instead of a Directory
else {
sprintf(msg, "Pathname \"%s\" is a File", dir_pathname);
iftError(msg, "iftLoadDir");
}
}
else {
sprintf(msg, "Pathname \"%s\" does not exist!", dir_pathname);
iftError(msg, "iftLoadDir");
}
return dir;
}
void iftDestroyDir(iftDir **dir)
{
if (dir != NULL) {
iftDir *dir_aux = *dir;
if (dir_aux != NULL) {
if (dir_aux->path != NULL)
iftFree(dir_aux->path);
// deallocates the files
if (dir_aux->files != NULL) {
for (long i = 0; i < dir_aux->nfiles; i++)
iftDestroyFile(&dir_aux->files[i]);
iftFree(dir_aux->files);
dir_aux->files = NULL;
}
if (dir_aux->subdirs != NULL) {
// deallocates the subdirs
for (long j = 0; j < dir_aux->nsubdirs; j++)
iftDestroyDir(&dir_aux->subdirs[j]);
iftFree(dir_aux->subdirs);
dir_aux->subdirs = NULL;
}
iftFree(dir_aux);
*dir = NULL;
}
}
}
// ---------- iftDir.c end
// ---------- iftRegex.c start
bool iftRegexMatch(const char *str, const char *regex_pattern, ...) {
if (str == NULL)
iftError("String is NULL", "iftRegexMatch");
if (regex_pattern == NULL)
iftError("Regular Expression is NULL", "iftRegexMatch");
char error[IFT_STR_DEFAULT_SIZE];
regex_t regex;
int reti;
va_list args;
char final_regex_pattern[IFT_STR_DEFAULT_SIZE];
va_start(args, regex_pattern);
vsprintf(final_regex_pattern, regex_pattern, args);
va_end(args);
// Compile Regex
if ((reti = regcomp(®ex, final_regex_pattern, REG_EXTENDED|REG_NOSUB)) != 0) {
regerror(reti, ®ex, error, sizeof(error));
iftError("Regex Compilation Failed: \"%s\"\n" \
"IFT_ERROR: %s", "iftRegexMatch", final_regex_pattern, error);
}
// Execute Regex
reti = regexec(®ex, str, (size_t) 0, NULL, 0);
regfree(®ex);
return (reti == 0);
}
// ---------- iftRegex.c end
// ---------- iftNumerical.c start
iftIntArray *iftIntRange(int begin, int end, int inc)
{
int n = ((end - begin) / inc) + 1;
iftIntArray *space = iftCreateIntArray(n);
for (int i = 0; i < n; ++i) {
space->val[i] = begin + (inc*i);
}
return space;
}
// ---------- iftNumerical.c end
// ---------- iftSort.c start
void iftFQuickSort( float *value, int *index, int i0, int i1, uchar order )
{
int m, d;
if( i0 < i1 ) {
/* random index to avoid bad pivots.*/
// d = iftRandomInteger( i0, i1 );
// to guarantee the same behavior on ties
d = (i0 + i1) / 2;
iftSwap( value[ d ], value[ i0 ] );
iftSwap( index[ d ], index[ i0 ] );
m = i0;
if(order == IFT_INCREASING ) {
for( d = i0 + 1; d <= i1; d++ ) {
if( value[ d ] < value[ i0 ] ) {
m++;
iftSwap( value[ d ], value[ m ] );
iftSwap( index[ d ], index[ m ] );
}
}
}
else {
for( d = i0 + 1; d <= i1; d++ ) {
if( value[ d ] > value[ i0 ] ) {
m++;
iftSwap( value[ d ], value[ m ] );
iftSwap( index[ d ], index[ m ] );
}
}
}
iftSwap( value[ m ], value[ i0 ] );
iftSwap( index[ m ], index[ i0 ] );
iftFQuickSort( value, index, i0, m - 1, order );
iftFQuickSort( value, index, m + 1, i1, order );
}
}
// ---------- iftSort.c end
// ---------- iftFileSet.c start
int _iftCmpFilesSortFileSet(const void *a, const void *b) {
iftFile **f1 = (iftFile**) a;
iftFile **f2 = (iftFile**) b;
return strcmp((*f1)->path, (*f2)->path);
}
void _iftGetFilesFromDirRec(iftDir *dir, iftSList *SL) {
for (long i = 0; i < dir->nfiles; i++)
iftInsertSListIntoTail(SL, dir->files[i]->path);
for (long i = 0; i < dir->nsubdirs; i++)
_iftGetFilesFromDirRec(dir->subdirs[i], SL);
}
iftFileSet *iftLoadFileSetFromDirOrCSV(const char *file_entry, long hier_levels, bool sort_pathnames) {
iftFileSet *fset = NULL;
if (iftDirExists(file_entry))
fset = iftLoadFileSetFromDir(file_entry, hier_levels); // it also returns a sorted list
else {
char *lower_file_entry = iftLowerString(file_entry);
if (iftFileExists(file_entry) && iftEndsWith(lower_file_entry, ".csv")) {
fset = iftLoadFileSetFromCSV(file_entry, sort_pathnames);
iftFree(lower_file_entry);
// if (sort_pathnames)
// iftSortFileSet(fset);
}
else
iftError("Invalid File Entry: %s\nIt is neither a directory nor a CSV file",
"iftLoadFileSetFromDirOrCSV", file_entry);
}
return fset;
}
iftFileSet *iftLoadFileSetFromCSV(const char *csv_pathname, bool sort_pathnames) {
iftCSV *csv = iftReadCSV(csv_pathname, ',');
long nfiles = csv->nrows*csv->ncols;
iftFileSet *farr = iftCreateFileSet(nfiles);
#if IFT_OMP
#pragma omp parallel for
#endif
for (long i = 0; i < csv->nrows; i++)
for (long j = 0; j < csv->ncols; j++) {
long p = j + i*csv->ncols;
char *aux = iftExpandUser(csv->data[i][j]);
farr->files[p] = iftCreateFile(aux);
iftFree(aux);
}
iftDestroyCSV(&csv);
if (sort_pathnames)
iftSortFileSet(farr);
return farr;
}
iftFileSet *iftLoadFileSetFromDir(const char *dir_pathname, long hier_level) {
if (dir_pathname == NULL)
iftError("Directory \"%s\" is NULL", "iftLoadFileSetFromDir");
if (!iftDirExists(dir_pathname))
iftError("Directory \"%s\" does not exist", "iftLoadFileSetFromDir", dir_pathname);
iftDir *dir = NULL; // loads all files from all dir levels
iftFileSet *farr = NULL;
iftSList *SL = NULL;
iftSNode *snode = NULL, *tnode = NULL;
dir = iftLoadDir(dir_pathname, hier_level);
SL = iftCreateSList();
// Gets all files in the entire from the directory and its subdirs
_iftGetFilesFromDirRec(dir, SL);
/**** Converts the String List into a File Array ****/
farr = iftCreateFileSet(SL->n);
// copies each node and destroys it
snode = SL->head;
long i = 0;
while (snode!= NULL) {
farr->files[i] = iftCreateFile(snode->elem);
i++;
tnode = snode;
snode = snode->next;
iftFree(tnode->elem);
iftFree(tnode);
}
SL->head = SL->tail = NULL;
iftDestroySList(&SL);
/*******************/
iftDestroyDir(&dir);
return farr;
}
iftFileSet *iftCreateFileSet(long nfiles) {
iftFileSet *farr = NULL;
farr = (iftFileSet *) iftAlloc(1, sizeof(iftFileSet));
farr->files = (iftFile**) iftAlloc(nfiles, sizeof(iftFile*));
farr->n = nfiles;
for(long i = 0; i < farr->n; i++)
farr->files[i] = NULL;
return farr;
}
iftFileSet *iftLoadFileSetFromDirByRegex(const char *dir_pathname, const char *regex, bool sort_pathnames) {
if (dir_pathname == NULL)
iftError("Dir's pathname is NULL", "iftLoadFilesFromDirByRegex");
if (regex == NULL)
iftError("Regex is NULL", "iftLoadFilesFromDirByRegex");
if (!iftDirExists(dir_pathname))
iftError("Directory \"%s\" does not exist", "iftReadFilesFromdDirectory", dir_pathname);
iftDir *dir = iftLoadFilesFromDirByRegex(dir_pathname, regex);
iftFileSet *farr = iftCreateFileSet(dir->nfiles);
for (long i = 0; i < dir->nfiles; i++)
farr->files[i] = iftCopyFile(dir->files[i]);
if (sort_pathnames)
iftSortFileSet(farr);
iftDestroyDir(&dir);
return farr;
}
void iftDestroyFileSet(iftFileSet **farr) {
if (farr != NULL) {
iftFileSet *faux = *farr;
if (faux != NULL) {
if (faux->files != NULL) {
for (long i = 0; i < faux->n; i++)
iftDestroyFile(&(faux->files[i]));
iftFree(faux->files);
}
iftFree(faux);
*farr = NULL;
}
}
}
void iftSortFileSet(iftFileSet *files) {
qsort(files->files, files->n, sizeof(iftFile*), _iftCmpFilesSortFileSet);
}
// ---------- iftFileSet.c end
// ---------- iftCSV.c start
bool _iftHasCSVHeader(const char *csv_pathname, char separator) {
bool has_header = false;
FILE *fp = fopen(csv_pathname, "rb");
if (fp == NULL)
iftError(MSG_FILE_OPEN_ERROR, "_iftHasCSVHeader", csv_pathname);
// reads the first line for checking if it is a CSV header
char *line = iftGetLine(fp);
if (line != NULL) {
bool is_first_line_ok = true;
// if all columns of first line have letters in the beginning of the string, the row can be the header
char strSeparator[2] = {separator, '\0'};
iftSList *SL = iftSplitString(line, strSeparator);
while (!iftIsSListEmpty(SL)) {
char *column = iftRemoveSListHead(SL);
if (!iftRegexMatch(column, "^[a-zA-Z]+.*$", separator)) {
is_first_line_ok = false;
iftFree(column);
break;
}
iftFree(column);
}
iftDestroySList(&SL);
if (is_first_line_ok) {
iftFree(line);
line = iftGetLine(fp);
if (line != NULL) {
iftSList *SL = iftSplitString(line, strSeparator);
iftFree(line);
while (SL->n != 0) {
char *column = iftRemoveSListHead(SL);
// if at least one column of the second row is a number (integer or real)
// the first row is a header
if (iftRegexMatch(column, "^[0-9]+(.[0-9]+)?$", separator)) {
iftFree(column);
has_header = true;
break;
}
iftFree(column);
}
iftDestroySList(&SL);
}
}
}
fclose(fp);
return has_header;
}
void _iftCountNumOfRowsAndColsFromCSVFile(const char *csv_pathname, long *nrows, long *ncols, char separator) {
char strSeparator[2] = {separator, '\0'};
FILE *fp = fopen(csv_pathname, "rb");
if (fp == NULL)
iftError(MSG_FILE_OPEN_ERROR, "_iftCountNumOfRowsAndColsFromCSVFile", csv_pathname);
*nrows = 0;
*ncols = 0;
// reads the first line from the file to get the number of cols
iftSList *SL = NULL;
char *line = iftGetLine(fp);
// gets the number of columns from the first line, because such number must be the same for
// the entire csv
if (line != NULL) {
SL = iftSplitString(line, strSeparator);
(*nrows)++;
*ncols = SL->n;
iftDestroySList(&SL);
}
iftFree(line);
line = iftGetLine(fp);
// gets each line of the file
while (line != NULL) {
SL = iftSplitString(line, strSeparator);
if (*ncols != SL->n)
iftError("Number of Columns is different in the lines: %d - %d",
"_iftCountNumOfRowsAndColsFromCSVFile", *ncols, SL->n);
iftDestroySList(&SL);
(*nrows)++;
iftFree(line);
line = iftGetLine(fp);
}
fclose(fp);
}
iftCSV *_iftCreateCSVWithoutStringAllocation(long nrows, long ncols) {
iftCSV *csv = (iftCSV*) iftAlloc(1, sizeof(iftCSV));
csv->nrows = nrows;
csv->ncols = ncols;
// allocates the CSV string matrix
csv->data = (char***) iftAlloc(nrows, sizeof(char**));
for (long i = 0; i < nrows; i++) {
csv->data[i] = (char**) iftAlloc(ncols, sizeof(char*));
}
return csv;
}
iftCSV *iftReadCSV(const char *csv_pathname, const char separator) {
if (!iftFileExists(csv_pathname))
iftError("The CSV file pathname \"%s\" does not exists!", "iftReadCSV", csv_pathname);
char strSeparator[2] = {separator, '\0'};
bool has_header = _iftHasCSVHeader(csv_pathname, separator);
long nrows, ncols;
_iftCountNumOfRowsAndColsFromCSVFile(csv_pathname, &nrows, &ncols, separator);
if (has_header)
nrows--;
iftCSV *csv = _iftCreateCSVWithoutStringAllocation(nrows, ncols);
FILE *fp = fopen(csv_pathname, "rb");
if (fp == NULL)
iftError(MSG_FILE_OPEN_ERROR, "_iftCountNumOfRowsAndColsFromCSVFile", csv_pathname);
// copies the values from the CSV file
iftSList *SL = NULL;
char *line = iftGetLine(fp);
if (has_header) {
csv->header = iftAlloc(csv->ncols, sizeof(char*));
SL = iftSplitString(line, strSeparator);
for (long j = 0; j < csv->ncols; j++) {
csv->header[j] = iftRemoveSListHead(SL); // just points to string
// removes the '\n' and '\r' from the paths
iftRightTrim(csv->header[j], '\n');
iftRightTrim(csv->header[j], '\r');
}
iftDestroySList(&SL);
iftFree(line);
line = iftGetLine(fp);
}
long i = 0;
while (line != NULL) {
SL = iftSplitString(line, strSeparator);
for (long j = 0; j < csv->ncols; j++) {
csv->data[i][j] = iftRemoveSListHead(SL); // just points to string
// removes the '\n' and '\r' from the paths
iftRightTrim(csv->data[i][j], '\n');
iftRightTrim(csv->data[i][j], '\r');
}
i++;
iftDestroySList(&SL);
iftFree(line);
line = iftGetLine(fp);
}
fclose(fp);
return csv;
}
void iftDestroyCSV(iftCSV **csv) {
iftCSV *csv_aux = *csv;
if (csv_aux != NULL) {
if (csv_aux->data != NULL) {
// deallocates the CSV string matrix
for (long i = 0; i < csv_aux->nrows; i++) {
if (csv_aux->data[i] != NULL)
for (long j = 0; j < csv_aux->ncols; j++) {
iftFree(csv_aux->data[i][j]);
}
iftFree(csv_aux->data[i]);
}
}
iftFree(csv_aux->data);
if (csv_aux->header != NULL) {
for (int c = 0; c < csv_aux->ncols; c++)
iftFree(csv_aux->header[c]);
iftFree(csv_aux->header);
}
iftFree(csv_aux);
*csv = NULL;
}
}
// ---------- iftCSV.c end
//===========================================================================//
// ADDED BY FELIPE
//===========================================================================//
void iftConvertNewBitDepth(iftImage **img, int new_depth)
{
#if IFT_DEBUG
assert(img != NULL && *img != NULL);
assert(new_depth > 0);
#endif
int old_depth, old_norm, new_norm;
float ratio;
old_depth = iftImageDepth(*img);
old_norm = iftMaxImageRange(old_depth);
new_norm = iftMaxImageRange(new_depth);
ratio = new_norm / (float) old_norm;
#if IFT_OMP
#pragma omp parallel for
#endif
for(int p = 0; p < (*img)->n; ++p)
{
iftColor rgb, new_ycbcr;
if(iftIsColorImage(*img) == true)
{
iftColor old_ycbcr;
old_ycbcr.val[0] = (*img)->val[p];
old_ycbcr.val[1] = (*img)->Cb[p];
old_ycbcr.val[2] = (*img)->Cr[p];
rgb = iftYCbCrtoRGB(old_ycbcr, old_norm);
}
else rgb.val[0] = rgb.val[1] = rgb.val[2] = (*img)->val[p];
rgb.val[0] *= ratio; rgb.val[1] *= ratio; rgb.val[2] *= ratio;
new_ycbcr = iftRGBtoYCbCr(rgb, new_norm);
(*img)->val[p] = new_ycbcr.val[0];
if(iftIsColorImage(*img) == true)
{
(*img)->Cb[p] = new_ycbcr.val[1];
(*img)->Cr[p] = new_ycbcr.val[2];
}
}
}
iftBMap *iftGetBorderMap
(const iftImage *label_img)
{
#if IFT_DEBUG
assert(label_img != NULL);
#endif
iftAdjRel *A;
iftBMap *border_map;
if(iftIs3DImage(label_img) == true) A = iftSpheric(1.0);
else A = iftCircular(1.0);
border_map = iftCreateBMap(label_img->n);
#if IFT_OMP //-------------------------------------------------------------//
#pragma omp parallel for
#endif //------------------------------------------------------------------//
for(int p = 0; p < label_img->n; ++p)
{
bool is_border;
int i;
iftVoxel p_vxl;
is_border = false;
p_vxl = iftGetVoxelCoord(label_img, p);
i = 0;
while(is_border == false && i < A->n)
{
iftVoxel adj_vxl;
adj_vxl = iftGetAdjacentVoxel(A, p_vxl, i);
if(iftValidVoxel(label_img, adj_vxl) == true)
{
int adj_idx;
adj_idx = iftGetVoxelIndex(label_img, adj_vxl);
if(label_img->val[p] != label_img->val[adj_idx])
is_border = true;
}
++i;
}
if(is_border == true) iftBMapSet1(border_map, p);
}
iftDestroyAdjRel(&A);
return border_map;
} |
VarVerletTraversalAsBuild.h | /**
* @file VarVerletTraversalAsBuild.h
* @author humig
* @date 21.05.19
*/
#pragma once
#include "autopas/containers/verletListsCellBased/verletLists/neighborLists/asBuild/VerletNeighborListAsBuild.h"
#include "autopas/containers/verletListsCellBased/verletLists/traversals/VarVerletTraversalInterface.h"
#include "autopas/options/TraversalOption.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* Traversal for VarVerletLists with VerletNeighborListAsBuild as neighbor list. Every particle pair will be processed
* by the same thread in the same color as during the build of the neighbor list.
*
* @tparam ParticleCell
* @tparam Particle The particle type used by the neighbor list.
* @tparam PairwiseFunctor The type of the functor to use for the iteration.
* @tparam dataLayout The data layout to use.
* @tparam useNewton3 Whether or not this traversal uses newton 3.
*/
template <class ParticleCell, class Particle, class PairwiseFunctor, DataLayoutOption::Value dataLayout,
bool useNewton3>
class VarVerletTraversalAsBuild : public VarVerletTraversalInterface<VerletNeighborListAsBuild<Particle>>,
public TraversalInterface {
private:
/**
* Internal iterate method for AoS.
* @param neighborList The neighbor list to iterate over.
*/
void iterateAoS(VerletNeighborListAsBuild<Particle> &neighborList);
/**
* Internal iterate method for SoA.
* @param neighborList The neighbor list to iterate over.
*/
void iterateSoA(VerletNeighborListAsBuild<Particle> &neighborList);
public:
/**
* The Constructor of VarVerletTraversalAsBuild.
* @param pairwiseFunctor The functor to use for the iteration.
*/
explicit VarVerletTraversalAsBuild(PairwiseFunctor *pairwiseFunctor) : _functor(pairwiseFunctor), _soa{nullptr} {}
bool getUseNewton3() const override { return useNewton3; }
DataLayoutOption getDataLayout() const override { return dataLayout; }
void initTraversal() override {
auto &neighborList = *(this->_neighborList);
if (dataLayout == DataLayoutOption::soa) {
_soa = neighborList.loadSoA(_functor);
}
}
void endTraversal() override {
auto &neighborList = *(this->_neighborList);
if (dataLayout == DataLayoutOption::soa) {
neighborList.extractSoA(_functor);
_soa = nullptr;
}
}
void traverseParticlePairs() override {
auto &neighborList = *(this->_neighborList);
switch (dataLayout) {
case DataLayoutOption::aos:
iterateAoS(neighborList);
break;
case DataLayoutOption::soa:
iterateSoA(neighborList);
break;
default:
autopas::utils::ExceptionHandler::exception("VarVerletTraversalAsBuild does not know this data layout!");
}
}
bool isApplicable() const override {
return dataLayout == DataLayoutOption::soa || dataLayout == DataLayoutOption::aos;
}
TraversalOption getTraversalType() const override { return TraversalOption::varVerletTraversalAsBuild; }
private:
/**
* The functor to use for the iteration.
*/
PairwiseFunctor *_functor;
/**
* A pointer to the SoA to iterate over if DataLayout is soa.
*/
SoA<typename Particle::SoAArraysType> *_soa;
};
template <class ParticleCell, class Particle, class PairwiseFunctor, DataLayoutOption::Value dataLayout,
bool useNewton3>
void VarVerletTraversalAsBuild<ParticleCell, Particle, PairwiseFunctor, dataLayout, useNewton3>::iterateAoS(
VerletNeighborListAsBuild<Particle> &neighborList) {
const auto &list = neighborList.getInternalNeighborList();
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel num_threads(list[0].size())
#endif
{
constexpr int numColors = 8;
for (int c = 0; c < numColors; c++) {
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(static)
#endif
for (unsigned int thread = 0; thread < list[c].size(); thread++) {
const auto ¤tParticleToNeighborMap = list[c][thread];
for (const auto &[currentParticle, neighborParticles] : currentParticleToNeighborMap) {
for (auto neighborParticle : neighborParticles) {
_functor->AoSFunctor(*(currentParticle), *neighborParticle, useNewton3);
}
}
}
}
}
}
template <class ParticleCell, class Particle, class PairwiseFunctor, DataLayoutOption::Value dataLayout,
bool useNewton3>
void VarVerletTraversalAsBuild<ParticleCell, Particle, PairwiseFunctor, dataLayout, useNewton3>::iterateSoA(
VerletNeighborListAsBuild<Particle> &neighborList) {
const auto &soaNeighborList = neighborList.getInternalSoANeighborList();
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel num_threads(soaNeighborList[0].size())
#endif
{
constexpr int numColors = 8;
for (int color = 0; color < numColors; color++) {
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(static)
#endif
for (unsigned int thread = 0; thread < soaNeighborList[color].size(); thread++) {
const auto &threadNeighborList = soaNeighborList[color][thread];
_functor->SoAFunctor(*_soa, threadNeighborList, 0, threadNeighborList.size(), useNewton3);
}
}
}
}
} // namespace autopas
|
GB_binop__times_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__times_uint32
// A.*B function (eWiseMult): GB_AemultB__times_uint32
// A*D function (colscale): GB_AxD__times_uint32
// D*A function (rowscale): GB_DxB__times_uint32
// C+=B function (dense accum): GB_Cdense_accumB__times_uint32
// C+=b function (dense accum): GB_Cdense_accumb__times_uint32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_uint32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_uint32
// C=scalar+B GB_bind1st__times_uint32
// C=scalar+B' GB_bind1st_tran__times_uint32
// C=A+scalar GB_bind2nd__times_uint32
// C=A'+scalar GB_bind2nd_tran__times_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x * y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__times_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__times_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__times_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__times_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__times_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__times_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__times_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB_bind1st_tran__times_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB_bind2nd_tran__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gridsearch.h | #ifndef TUNING_H
#define TUNING_H
// TODO: change after integrate with module
// import from Cosan
// TODO #include<> cross validate here
#include<cosan/selection/crossvalidation.h>
#include<cosan/selection/selection.h>
namespace Cosan
{
/**
* Hyperparameter tuning for supervised models that have one or more hyperparameter(s) to tune
* Input required:
* estimator: class Model&, a model whose hyperparameters need to be tuned;
* metric: class Metric&, a metric to use in cross-validation
* split: Split & splitter method;*
* paramGrid: a vector of hyperparameters combination. For each entry of the vector, it corresponds to one choice of hyperparameter combination;
* Ouput: call .GetBestParams() to get the best hyperparameters combination.
* the choice of the hyper-parameter in paramGrid that forms the most accurate model
**/
template<Numeric NumericType,
Derived<CosanModel> Model,
Derived<CosanMetric<NumericType>> Metric,
Derived<Splitter> Split>
class GridSearch: public Search{
public:
GridSearch() = delete;
GridSearch( CosanData<NumericType> &CRD,
Model & estimator,
Metric & metric,
Split & split,
const std::vector<NumericType> & paramGrid): Search() {
NumericType minError = std::numeric_limits<NumericType>::infinity();
NumericType currError;
decltype(bestParam) currParam;
for (gsl::index i = 0; i < paramGrid.size(); ++i){
currParam = paramGrid[i];
estimator.SetParams(paramGrid[i]);
currError = crossValidation(CRD, estimator, metric, split);
if (currError < minError)
{
minError = currError;
bestParam = currParam;
}
}
}
auto GetBestParams(){return bestParam;}
private:
NumericType bestParam;};
/**
* @details parallel version for grid search
* Hyperparameter tuning for supervised models that have one or more hyperparameter(s) to tune
* Input required:
* estimator: class Model&, a model whose hyperparameters need to be tuned;
* metric: class Metric&, a metric to use in cross-validation
* split: Split & splitter method;*
* paramGrid: a vector of hyperparameters combination. For each entry of the vector, it corresponds to one choice of hyperparameter combination;
* nthreads: int. number of threads to be used for parallel computing.
* Ouput: call .GetBestParams() to get the best hyperparameters combination.
* the choice of the hyper-parameter in paramGrid that forms the most accurate model
**/
template<Numeric NumericType,
Derived<CosanModel> Model,
Derived<CosanMetric<NumericType>> Metric,
Derived<Splitter> Split>
class GridSearchParallel: public Search{
public:
GridSearchParallel() = delete;
// GridSearch(const std::variant<CosanRawData<NumericType>,CosanData<NumericType>> &CRD,
// Model & estimator,
// Metric & metric,
// Split & split,
// const std::vector<std::variant<NumericType,std::vector<NumericType>>> & paramGrid): Selection() {
GridSearchParallel( CosanData<NumericType> &CRD,
Model & estimator,
Metric & metric,
Split & split,
const std::vector<NumericType> & paramGrid, int nthreads = -1): Search() {
NumericType minError = std::numeric_limits<NumericType>::infinity();
std::vector<NumericType> allError(paramGrid.size());
if (nthreads == -1){
omp_set_num_threads(omp_get_max_threads());
}
else{
omp_set_num_threads(nthreads);
}
#pragma omp parallel for
for (gsl::index i = 0; i < paramGrid.size(); ++i){
estimator.SetParams(paramGrid[i]);
allError[i] = crossValidation(CRD, estimator, metric, split);
}
bestParam =paramGrid[std::distance(allError.begin(), std::min_element(allError.begin(), allError.end()))];
}
auto GetBestParams(){return bestParam;}
private:
NumericType bestParam;};
// template<typename NumericType,
// Derived<CosanModel> Model,
// Derived<CosanMetric<NumericType>> Metric,
// Derived<Splitter> Split,
// typename = typename std::enable_if<std::is_arithmetic<NumericType>::value,NumericType>::type>
template<Numeric NumericType,
Derived<CosanModel> Model,
Derived<CosanMetric<NumericType>> Metric,
Derived<Splitter> Split>
class GridSearchMulti: public Search{
public:
GridSearchMulti() = delete;
// GridSearch(const std::variant<CosanRawData<NumericType>,CosanData<NumericType>> &CRD,
// Model & estimator,
// Metric & metric,
// Split & split,
// const std::vector<std::variant<NumericType,std::vector<NumericType>>> & paramGrid): Selection() {
GridSearchMulti( CosanData<NumericType> &CRD,
Model & estimator,
Metric & metric,
Split & split,
const std::vector<std::vector<NumericType>> & paramGrid): Search() {
NumericType minError = std::numeric_limits<NumericType>::infinity();
NumericType currError;
decltype(bestParam) currParam;
for (gsl::index i = 0; i < paramGrid.size(); ++i){
currParam = paramGrid[i];
estimator.SetParams(paramGrid[i]);
currError = crossValidation(CRD, estimator, metric, split);
if (currError < minError)
{
minError = currError;
bestParam = currParam;
}
}
}
auto GetBestParams(){return bestParam;}
private:
std::vector<NumericType> bestParam;};
// template<typename NumericType,
// Derived<CosanModel> Model,
// Derived<CosanMetric<NumericType>> Metric,
// Derived<Splitter> Split,
// typename = typename std::enable_if<std::is_arithmetic<NumericType>::value,NumericType>::type>
template<Numeric NumericType,
Derived<CosanModel> Model,
Derived<CosanMetric<NumericType>> Metric,
Derived<Splitter> Split>
class GridSearchMultiParallel: public Search{
public:
GridSearchMultiParallel() = delete;
// GridSearch(const std::variant<CosanRawData<NumericType>,CosanData<NumericType>> &CRD,
// Model & estimator,
// Metric & metric,
// Split & split,
// const std::vector<std::variant<NumericType,std::vector<NumericType>>> & paramGrid): Selection() {
GridSearchMultiParallel( CosanData<NumericType> &CRD,
Model & estimator,
Metric & metric,
Split & split,
const std::vector<std::vector<NumericType>> & paramGrid,
int nthreads = -1): Search() {
NumericType minError = std::numeric_limits<NumericType>::infinity();
std::vector<NumericType> allError(paramGrid.size());
if (nthreads == -1){
omp_set_num_threads(omp_get_max_threads());
}
else{
omp_set_num_threads(nthreads);
}
#pragma omp parallel for
for (gsl::index i = 0; i < paramGrid.size(); ++i){
estimator.SetParams(paramGrid[i]);
allError[i] = crossValidation(CRD, estimator, metric, split);
}
bestParam =paramGrid[std::distance(allError.begin(), std::min_element(allError.begin(), allError.end()))];
}
auto GetBestParams(){return bestParam;}
private:
std::vector<NumericType> bestParam;};
}
#endif |
Critical.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define TOTAL 500000000
int main(int argc, char* argv[]){
if (argc!=2) return 1;
const int threads_num=atoi(argv[1]);
long sum=0;
#pragma omp parallel num_threads(threads_num)
{
long localsum=0;
unsigned int seed=time(NULL);
#pragma omp for
for(long i=0;i<TOTAL;i++){
float x = rand_r(&seed)/ (float) RAND_MAX;
float y = rand_r(&seed)/ (float) RAND_MAX;
if (sqrt((x*x) + (y*y)) < 1 )
localsum++;
}
#pragma omp critical
sum+=localsum;
}
//printf("Pi is : %f\n",4*sum/(float)TOTAL);
}
|
tsv.c | /* This code is part of this project: Donato E, Ouyang M,
* Peguero-Isalguez C. Triangle counting with a multi-core computer.
* Proceedings of IEEE High Performance Extreme Computing Conference
* (HPEC), 2018, 1-7.
*
* Copyright (c) 2018 Ming Ouyang
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <omp.h>
#include "ompTri.h"
/* read a tsv file
* store the graph in degree[n] and neighbor[n][]
*
* tsv format: one edge per line, each line is 3 numbers separated by 2 tabs
* "u\tv\tedgeWeight\n"
* vertex numbers, u and v, start at 1
*/
void readTSV(char *filename) {
uint64_t i, j, *off, numItem, *myNumItem, chunk, start, end;
uint64_t *rawNum, u, v;
struct stat buf;
char *buffer;
int status;
FILE *fp;
status = stat(filename, &buf);
if (status) {
printf("no such file: %s\n", filename);
exit(0);
}
if (verbose)
printf("file has %lu bytes, ", buf.st_size);
buffer = (char*) malloc(buf.st_size);
myNumItem = (uint64_t*) malloc(sizeof(uint64_t) * (numT + 1));
for (i = 0; i <= numT; i++) //prefix sum later, need one extra element
myNumItem[i] = 0;
chunk = buf.st_size / numT;
//grab the whole file
fp = fopen(filename, "rb");
fread((void*) buffer, 1, buf.st_size, fp);
fclose(fp);
//count how many numbers are in the file
#pragma omp parallel for private(j,start,end)
for (i = 0; i < numT; i++) {
start = i * chunk;
end = (i == numT - 1) ? buf.st_size : start + chunk;
for (j = start; j < end; j++)
if (buffer[j] == '\t' || buffer[j] == '\n')
myNumItem[i + 1]++; //note (i + 1), shift by one
}
for (i = 0; i < numT; i++) //prefix sum
myNumItem[i + 1] += myNumItem[i];
numItem = myNumItem[numT]; //number of numbers in the file
off = (uint64_t*) malloc(sizeof(uint64_t) * (numItem + 1));
rawNum = (uint64_t*) malloc(sizeof(uint64_t) * numItem);
off[0] = 0;
off += 1;
//locate the beginning of each number in the file
#pragma omp parallel for private(j,start,end)
for (i = 0; i < numT; i++) {
start = i * chunk;
end = (i == numT - 1) ? buf.st_size : start + chunk;
for (j = start; j < end; j++)
if (buffer[j] == '\t' || buffer[j] == '\n')
off[ myNumItem[i]++ ] = j + 1;
}
off -= 1;
n = 0; //for max reduction
#pragma omp parallel for reduction(max:n)
for (i = 0; i < numItem; i++) {
rawNum[i] = str2u64( &buffer[ off[i] ]);
n = (n < rawNum[i]) ? rawNum[i] : n;
}
free(off);
free(myNumItem);
free(buffer);
degree = (uint64_t*) malloc(sizeof(uint64_t) * n);
neighbor = (uint64_t**)malloc(sizeof(uint64_t*) * n);
for (i = 0; i < n; i++)
degree[i] = 0;
//vertex numbers in a tsv file start at 1
for (i = 0; i < numItem; i += 3)
degree[rawNum[i] - 1]++; //shift to 0-based indexing
for (i = 0; i < n; i++) {
if (degree[i])
neighbor[i] = (uint64_t*) malloc(sizeof(uint64_t) * degree[i]);
else
neighbor[i] = NULL;
degree[i] = 0;
}
for (i = 0; i < numItem; i += 3) {
u = rawNum[i] - 1;
v = rawNum[i + 1] - 1;
if (u == v) {
fprintf(stderr, "self-loop: i %lu, u %lu\n", i, u);
continue;
}
neighbor[u] [degree[u]++] = v;
}
free(rawNum);
if (verbose)
printf("n %lu, m %lu\n", n, numItem / 3 >> 1); //m is not set yet
}
//memory not freed: degree, neighbor, neighbor[*]
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict crop_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict excerpt_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == geometry->width) &&
(image->rows == geometry->height) &&
(geometry->x == 0) && (geometry->y == 0))
return(CloneImage(image,0,0,MagickTrue,exception));
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageBackgroundColor(extent_image);
(void) CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flip_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict destination_indexes;
register PixelPacket
*magick_restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be deprecated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status&=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transpose_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict transverse_indexes,
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
header.h | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// header.h
//
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// The following include file is generated automatically by the
// "setparams" utility. It defines
// maxcells: the square root of the maximum number of processors
// problem_size: 12, 64, 102, 162 (for class T, A, B, C)
// dt_default: default time step for this problem size if no
// config file
// niter_default: default number of iterations for this problem size
//---------------------------------------------------------------------
#include "npbparams.h"
#include "type.h"
#include <stdio.h>
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* common /global/ */
extern double elapsed_time;
#pragma omp declare target
extern int grid_points[3], nx, ny, nz;
#pragma omp end declare target
extern int timeron;
/* common /constants/ */
extern double tx3, ty1, ty3, tz1, tz3,
ce[5][13], xxcon1,
yycon1,
zzcon1,
dnxm1, dnym1,
dnzm1, conz1,
c3, c4, c5, c4dssp, c5dssp, dtdssp,
c3c4tx3, c3c4ty3, c3c4tz3, con16;
#pragma omp declare target
extern double tx1, tx2, ty2, tz2,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
dxmax, dymax, dzmax, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1,
c1c2, c1c5, c3c4, c1345, c1, c2,
dttx1,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c2iv, con43;
#pragma omp end declare target
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
#define IMAXP IMAX/2*2
#define JMAXP JMAX/2*2
// to improve cache performance, grid dimensions padded by 1
// for even number sizes only.
/* common /fields/ */
#pragma omp declare target
extern double us [KMAX][JMAXP+1][IMAXP+1];
extern double vs [KMAX][JMAXP+1][IMAXP+1];
extern double ws [KMAX][JMAXP+1][IMAXP+1];
extern double qs [KMAX][JMAXP+1][IMAXP+1];
extern double rho_i [KMAX][JMAXP+1][IMAXP+1];
extern double square [KMAX][JMAXP+1][IMAXP+1];
extern double forcing[KMAX][JMAXP+1][IMAXP+1][5];
extern double u [KMAX][JMAXP+1][IMAXP+1][5];
extern double rhs [KMAX][JMAXP+1][IMAXP+1][5];
#pragma omp end declare target
/* common /work_1d/ */
extern double cuf[PROBLEM_SIZE+1];
extern double q [PROBLEM_SIZE+1];
extern double ue [PROBLEM_SIZE+1][5];
extern double buf[PROBLEM_SIZE+1][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
#define t_total 1
#define t_rhsx 2
#define t_rhsy 3
#define t_rhsz 4
#define t_rhs 5
#define t_xsolve 6
#define t_ysolve 7
#define t_zsolve 8
#define t_rdis1 9
#define t_rdis2 10
#define t_add 11
#define t_last 11
void initialize();
void lhsinit(double lhs[][3][5][5], int size);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void y_solve();
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]);
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
void binvrhs(double lhs[5][5], double r[5]);
void z_solve();
void add();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *classT, int *verified);
|
byrow.h | void byrow()
{
int i,j,k;
int diag,row,col;
char a,b;
int _max,t;
#pragma omp parallel for
for(i=0; i<=N-2; i++){
S[i][i] = 0;
S[i][i+1] = 0;
}
S[N-1][N-1] = 0;
for(i=N-3; i>=0; i--){
for(j=i+2; j<=N-1; j++)
S[i][j] = S[i+1][j-1] + can_pair(RNA, i, j);
for(k=i; k <=N-2; k++)
#pragma omp parallel for private(j)
for(j=k+1; j <=N-1; j++)
S[i][j] = max(S[i][j], S[i][k] + S[k+1][j]);
}
}
|
smap.c | #include <stdbool.h>
#include <threads.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#define CIRCA_STATIC
#include "../circa_mac.h"
#include "../circa_err.h"
#include "../circa_cmp.h"
#include "../circa_hash.h"
#define K int
#define V int
#include "../circa_map.h"
#include "../circa_smap.h"
#undef V
#undef K
#define SIZ 1000000
int main() {
smap_int_int m;
smap_int_int_alloc(&m, 4);
#pragma omp parallel for num_threads(4)
for (int i = 0; i < 4; i++) {
for (int j = i * SIZ; j < (i + 1) * SIZ; j++) {
smap_int_int_set(&m, &j, &i);
}
}
smap_int_int_free(&m);
return 0;
}
|
test-mhp.c | int x;
int main() {
x = 0;
#pragma omp parallel
{
x = 1;
#pragma omp barrier
j: x = 2;
#pragma omp barrier
x = 3;
#pragma omp barrier
g: x = 4;
foo();
}
}
void foo () {
w: while (x > 0) {
if (x == 3) {
#pragma omp barrier
x = 11;
//#pragma omp parallel
{
x = 12;
#pragma omp barrier
x = 13;
}
goto g;
}
}
g : x = 9;
}
|
GB_unop__lnot_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_bool_bool
// op(A') function: GB_unop_tran__lnot_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = !aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = !z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = !z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
14.c | /* Написать параллельную программу возведения числа 210 в квадрат
без операции
умножения.
Пояснение: Квадрат натурального числа N равен сумме первых N нечетных чисел. Например,
32 = 9 это 1+3+5=9; 52 = 25 это 1+3+5+7+9=25; */
#include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[])
{
int number = 210;
long result = 0;
int add = 1;
#pragma omp parallel for reduction(+: result)
for (int i = 0; i < number; i++)
{
#pragma omp critical
{
result += add;
add += 2;
}
}
printf("%ld", result);
} |
irbuilder_unroll_unroll_partial_heuristic.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_unroll_partial_heuristic(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 8
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 8
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP7:.+]] = icmp eq i32 %[[OMP_FLOOR0_IV]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP8:.+]] = select i1 %[[TMP7]], i32 %[[TMP4]], i32 8
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP8]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP9:.+]] = mul nuw i32 8, %[[OMP_FLOOR0_IV]]
// CHECK-NEXT: %[[TMP10:.+]] = add nuw i32 %[[TMP9]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP10]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP11:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP12]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP11]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP13:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP14:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP15:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP15]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP14]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP16:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP13]], %[[TMP16]]
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP19]]
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]], !llvm.loop ![[LOOP6:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_unroll_partial_heuristic(float *a, float *b, float *c, float *d) {
#pragma omp unroll partial
#pragma omp unroll partial
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 8}
// CHECK: ![[LOOP6]] = distinct !{![[LOOP6]], ![[LOOPPROP4]]}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 16-inch-outch
// dst = inch-16-outch
#if __SSE2__
kernel_tm2.create(8 * inch, 16, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm2.create(inch, 16, outch);
#endif
int q = 0;
#if __SSE2__
for (; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm2.channel(q / 8);
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 4; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
#endif
for (; q < outch; q++)
{
#if __SSE2__
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
Mat g0 = kernel_tm2.channel(q);
#endif
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
const float* k00 = kernel_tm.channel(q).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 2;
int h_tiles = outh / 2;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 16, inch, 4u, opt.workspace_allocator);
conv3x3s1_winograd23_transform_input_sse(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
conv3x3s1_winograd23_transform_output_sse(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = inch-36-outch
#if __SSE2__
kernel_tm2.create(8 * inch, 36, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm2.create(inch, 36, outch);
#endif
int q = 0;
#if __SSE2__
for (; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm2.channel(q / 8);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 4; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
#endif
for (; q < outch; q++)
{
#if __SSE2__
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
Mat g0 = kernel_tm2.channel(q);
#endif
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
const float* k00 = kernel_tm.channel(q).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, 4u, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_sse(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
convolution_winograd_dot_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt);
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_sse(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
quicksort.h | // -*- C++ -*-
// Copyright (C) 2007-2017 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/quicksort.h
* @brief Implementation of a unbalanced parallel quicksort (in-place).
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUICKSORT_H
#define _GLIBCXX_PARALLEL_QUICKSORT_H 1
#include <parallel/parallel.h>
#include <parallel/partition.h>
namespace __gnu_parallel
{
/** @brief Unbalanced quicksort divide step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __pivot_rank Desired __rank of the pivot.
* @param __num_samples Choose pivot from that many samples.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
typename std::iterator_traits<_RAIter>::difference_type
__parallel_sort_qs_divide(_RAIter __begin, _RAIter __end,
_Compare __comp, typename std::iterator_traits
<_RAIter>::difference_type __pivot_rank,
typename std::iterator_traits
<_RAIter>::difference_type
__num_samples, _ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
__num_samples = std::min(__num_samples, __n);
// Allocate uninitialized, to avoid default constructor.
_ValueType* __samples = static_cast<_ValueType*>
(::operator new(__num_samples * sizeof(_ValueType)));
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
{
const unsigned long long __index = static_cast<unsigned long long>
(__s) * __n / __num_samples;
::new(&(__samples[__s])) _ValueType(__begin[__index]);
}
__gnu_sequential::sort(__samples, __samples + __num_samples, __comp);
_ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n];
__gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool>
__pred(__comp, __pivot);
_DifferenceType __split = __parallel_partition(__begin, __end,
__pred, __num_threads);
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
__samples[__s].~_ValueType();
::operator delete(__samples);
return __split;
}
/** @brief Unbalanced quicksort conquer step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
if (__num_threads <= 1)
{
__gnu_sequential::sort(__begin, __end, __comp);
return;
}
_DifferenceType __n = __end - __begin, __pivot_rank;
if (__n <= 1)
return;
_ThreadIndex __num_threads_left;
if ((__num_threads % 2) == 1)
__num_threads_left = __num_threads / 2 + 1;
else
__num_threads_left = __num_threads / 2;
__pivot_rank = __n * __num_threads_left / __num_threads;
_DifferenceType __split = __parallel_sort_qs_divide
(__begin, __end, __comp, __pivot_rank,
_Settings::get().sort_qs_num_samples_preset, __num_threads);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
__parallel_sort_qs_conquer(__begin, __begin + __split,
__comp, __num_threads_left);
#pragma omp section
__parallel_sort_qs_conquer(__begin + __split, __end,
__comp, __num_threads - __num_threads_left);
}
}
/** @brief Unbalanced quicksort main call.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator input sequence, ignored.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
_GLIBCXX_CALL(__n)
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
// At least one element per processor.
if (__num_threads > __n)
__num_threads = static_cast<_ThreadIndex>(__n);
__parallel_sort_qs_conquer(
__begin, __begin + __n, __comp, __num_threads);
}
} //namespace __gnu_parallel
#endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.